Merged common/unused files with regular runner and small bug fixes
Change-Id: I039522788bb4b171111ef904f519e73af8a1f56e
This commit is contained in:
parent
8c234e2f3f
commit
02a97f1c2a
@ -11,9 +11,13 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from __future__ import print_function
|
||||||
|
from traceback import print_exc
|
||||||
|
from warnings import warn
|
||||||
import argparse
|
import argparse
|
||||||
import os
|
import os
|
||||||
from warnings import warn
|
import sys
|
||||||
|
|
||||||
from cafe.common.reporting.cclogging import \
|
from cafe.common.reporting.cclogging import \
|
||||||
get_object_namespace, getLogger, setup_new_cchandler, log_info_block
|
get_object_namespace, getLogger, setup_new_cchandler, log_info_block
|
||||||
from cafe.common.reporting.metrics import \
|
from cafe.common.reporting.metrics import \
|
||||||
@ -185,3 +189,27 @@ def print_mug(name, brewing_from):
|
|||||||
print(border)
|
print(border)
|
||||||
print(mug)
|
print(mug)
|
||||||
print(border)
|
print(border)
|
||||||
|
|
||||||
|
|
||||||
|
def print_exception(file_=None, method=None, value=None, exception=None):
|
||||||
|
"""
|
||||||
|
Prints exceptions in a standard format to stderr.
|
||||||
|
"""
|
||||||
|
print("{0}".format("=" * 70), file=sys.stderr)
|
||||||
|
if file_:
|
||||||
|
print("{0}:".format(file_), file=sys.stderr, end=" ")
|
||||||
|
if method:
|
||||||
|
print("{0}:".format(method), file=sys.stderr, end=" ")
|
||||||
|
if value:
|
||||||
|
print("{0}:".format(value), file=sys.stderr, end=" ")
|
||||||
|
if exception:
|
||||||
|
print("{0}:".format(exception), file=sys.stderr, end=" ")
|
||||||
|
print("\n{0}".format("-" * 70), file=sys.stderr)
|
||||||
|
if exception is not None:
|
||||||
|
print_exc(file=sys.stderr)
|
||||||
|
print(file=sys.stderr)
|
||||||
|
|
||||||
|
|
||||||
|
def get_error(exception=None):
|
||||||
|
"""Gets errno from exception or returns one"""
|
||||||
|
return getattr(exception, "errno", 1)
|
||||||
|
@ -17,10 +17,11 @@ import argparse
|
|||||||
import errno
|
import errno
|
||||||
import importlib
|
import importlib
|
||||||
import os
|
import os
|
||||||
|
import re
|
||||||
import sys
|
import sys
|
||||||
|
|
||||||
from cafe.configurator.managers import EngineConfigManager
|
from cafe.configurator.managers import EngineConfigManager
|
||||||
from cafe.drivers.unittest.common import print_exception, get_error
|
from cafe.drivers.base import print_exception, get_error
|
||||||
from cafe.engine.config import EngineConfig
|
from cafe.engine.config import EngineConfig
|
||||||
|
|
||||||
|
|
||||||
@ -140,6 +141,22 @@ class TagAction(argparse.Action):
|
|||||||
setattr(namespace, self.dest, values)
|
setattr(namespace, self.dest, values)
|
||||||
|
|
||||||
|
|
||||||
|
class RegexAction(argparse.Action):
|
||||||
|
"""
|
||||||
|
Processes regex option.
|
||||||
|
"""
|
||||||
|
def __call__(self, parser, namespace, values, option_string=None):
|
||||||
|
regex_list = []
|
||||||
|
for regex in values:
|
||||||
|
try:
|
||||||
|
regex_list.append(re.compile(regex))
|
||||||
|
except re.error as exception:
|
||||||
|
parser.error(
|
||||||
|
"RegexAction: Invalid regex {0} reason: {1}".format(
|
||||||
|
regex, exception))
|
||||||
|
setattr(namespace, self.dest, regex_list)
|
||||||
|
|
||||||
|
|
||||||
class VerboseAction(argparse.Action):
|
class VerboseAction(argparse.Action):
|
||||||
"""
|
"""
|
||||||
Custom action that sets VERBOSE environment variable.
|
Custom action that sets VERBOSE environment variable.
|
||||||
@ -158,7 +175,7 @@ class ArgumentParser(argparse.ArgumentParser):
|
|||||||
usage_string = """
|
usage_string = """
|
||||||
cafe-runner <config> <testrepos>... [--fail-fast]
|
cafe-runner <config> <testrepos>... [--fail-fast]
|
||||||
[--supress-load-tests] [--dry-run]
|
[--supress-load-tests] [--dry-run]
|
||||||
[--data-directory=DATA_DIRECTORY] [--dotpath-regex=REGEX...]
|
[--data-directory=DATA_DIRECTORY] [--regex-list=REGEX...]
|
||||||
[--file] [--parallel=(class|test)] [--result=(json|xml)]
|
[--file] [--parallel=(class|test)] [--result=(json|xml)]
|
||||||
[--result-directory=RESULT_DIRECTORY] [--tags=TAG...]
|
[--result-directory=RESULT_DIRECTORY] [--tags=TAG...]
|
||||||
[--verbose=VERBOSE]
|
[--verbose=VERBOSE]
|
||||||
@ -222,11 +239,15 @@ class ArgumentParser(argparse.ArgumentParser):
|
|||||||
help="Data directory override")
|
help="Data directory override")
|
||||||
|
|
||||||
self.add_argument(
|
self.add_argument(
|
||||||
"--dotpath-regex", "-d",
|
"--regex-list", "-d",
|
||||||
|
action=RegexAction,
|
||||||
nargs="+",
|
nargs="+",
|
||||||
default=[],
|
default=[],
|
||||||
metavar="REGEX",
|
metavar="REGEX",
|
||||||
help="Package Filter")
|
help="Filter by regex against dotpath down to test level"
|
||||||
|
"Example: tests.repo.cafe_tests.NoDataGenerator.test_fail"
|
||||||
|
"Example: 'NoDataGenerator\.*fail'"
|
||||||
|
"Takes in a list and matches on any")
|
||||||
|
|
||||||
self.add_argument(
|
self.add_argument(
|
||||||
"--file", "-F",
|
"--file", "-F",
|
@ -12,20 +12,21 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from itertools import product
|
from itertools import product
|
||||||
import json
|
|
||||||
from string import ascii_letters, digits
|
from string import ascii_letters, digits
|
||||||
|
import json
|
||||||
|
|
||||||
ALLOWED_FIRST_CHAR = "_{0}".format(ascii_letters)
|
ALLOWED_FIRST_CHAR = "_{0}".format(ascii_letters)
|
||||||
ALLOWED_OTHER_CHARS = "{0}{1}".format(ALLOWED_FIRST_CHAR, digits)
|
ALLOWED_OTHER_CHARS = "{0}{1}".format(ALLOWED_FIRST_CHAR, digits)
|
||||||
|
|
||||||
|
|
||||||
class _Dataset(object):
|
class _Dataset(object):
|
||||||
|
"""Defines a set of data to be used as input for a data driven test.
|
||||||
|
data_dict should be a dictionary with keys matching the keyword
|
||||||
|
arguments defined in test method that consumes the dataset.
|
||||||
|
name should be a string describing the dataset.
|
||||||
|
This class should not be accessed directly. Use or extend DatasetList.
|
||||||
|
"""
|
||||||
def __init__(self, name, data_dict, tags=None):
|
def __init__(self, name, data_dict, tags=None):
|
||||||
"""Defines a set of data to be used as input for a data driven test.
|
|
||||||
data_dict should be a dictionary with keys matching the keyword
|
|
||||||
arguments defined in test method that consumes the dataset.
|
|
||||||
name should be a string describing the dataset.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.name = name
|
self.name = name
|
||||||
self.data = data_dict
|
self.data = data_dict
|
||||||
self.metadata = {'tags': tags or []}
|
self.metadata = {'tags': tags or []}
|
||||||
@ -58,7 +59,6 @@ class DatasetList(list):
|
|||||||
raise TypeError(
|
raise TypeError(
|
||||||
"extend() argument must be type DatasetList, not {0}".format(
|
"extend() argument must be type DatasetList, not {0}".format(
|
||||||
type(dataset_list)))
|
type(dataset_list)))
|
||||||
|
|
||||||
super(DatasetList, self).extend(dataset_list)
|
super(DatasetList, self).extend(dataset_list)
|
||||||
|
|
||||||
def extend_new_datasets(self, dataset_list):
|
def extend_new_datasets(self, dataset_list):
|
||||||
@ -66,19 +66,17 @@ class DatasetList(list):
|
|||||||
self.extend(dataset_list)
|
self.extend(dataset_list)
|
||||||
|
|
||||||
def apply_test_tags(self, *tags):
|
def apply_test_tags(self, *tags):
|
||||||
|
"""Applys tags to all tests in dataset list"""
|
||||||
for dataset in self:
|
for dataset in self:
|
||||||
dataset.apply_test_tags(tags)
|
dataset.apply_test_tags(tags)
|
||||||
|
|
||||||
def dataset_names(self):
|
def dataset_names(self):
|
||||||
|
"""Gets a list of dataset names from dataset list"""
|
||||||
return [ds.name for ds in self]
|
return [ds.name for ds in self]
|
||||||
|
|
||||||
def dataset_name_map(self):
|
def dataset_name_map(self):
|
||||||
name_map = {}
|
"""Creates a dictionary with key=count and value=dataset name"""
|
||||||
count = 0
|
return {count: ds.name for count, ds in enumerate(self)}
|
||||||
for ds in self:
|
|
||||||
name_map[count] = ds.name
|
|
||||||
count += 1
|
|
||||||
return name_map
|
|
||||||
|
|
||||||
def merge_dataset_tags(self, *dataset_lists):
|
def merge_dataset_tags(self, *dataset_lists):
|
||||||
local_name_map = self.dataset_name_map()
|
local_name_map = self.dataset_name_map()
|
||||||
@ -118,11 +116,14 @@ class DatasetListCombiner(DatasetList):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, *datasets):
|
def __init__(self, *datasets):
|
||||||
for data in product(*datasets):
|
super(DatasetListCombiner, self).__init__()
|
||||||
|
for dataset_list in product(*datasets):
|
||||||
tmp_dic = {}
|
tmp_dic = {}
|
||||||
[tmp_dic.update(d.data) for d in data]
|
names = []
|
||||||
self.append_new_dataset(
|
for dataset in dataset_list:
|
||||||
"_".join([x.name for x in data]), tmp_dic)
|
tmp_dic.update(dataset.data)
|
||||||
|
names.append(dataset.name)
|
||||||
|
self.append_new_dataset("_".join(names), tmp_dic)
|
||||||
|
|
||||||
|
|
||||||
class DatasetGenerator(DatasetList):
|
class DatasetGenerator(DatasetList):
|
||||||
@ -133,6 +134,7 @@ class DatasetGenerator(DatasetList):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, list_of_dicts, base_dataset_name=None):
|
def __init__(self, list_of_dicts, base_dataset_name=None):
|
||||||
|
super(DatasetGenerator, self).__init__()
|
||||||
count = 0
|
count = 0
|
||||||
for kwdict in list_of_dicts:
|
for kwdict in list_of_dicts:
|
||||||
test_name = "{0}_{1}".format(base_dataset_name or "dataset", count)
|
test_name = "{0}_{1}".format(base_dataset_name or "dataset", count)
|
||||||
@ -146,6 +148,7 @@ class TestMultiplier(DatasetList):
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, num_range):
|
def __init__(self, num_range):
|
||||||
|
super(TestMultiplier, self).__init__()
|
||||||
for num in range(num_range):
|
for num in range(num_range):
|
||||||
name = "{0}".format(num)
|
name = "{0}".format(num)
|
||||||
self.append_new_dataset(name, dict())
|
self.append_new_dataset(name, dict())
|
||||||
@ -161,6 +164,7 @@ class DatasetFileLoader(DatasetList):
|
|||||||
load order, so that not all datasets need to be named.
|
load order, so that not all datasets need to be named.
|
||||||
"""
|
"""
|
||||||
def __init__(self, file_object):
|
def __init__(self, file_object):
|
||||||
|
super(DatasetFileLoader, self).__init__()
|
||||||
content = json.loads(str(file_object.read()))
|
content = json.loads(str(file_object.read()))
|
||||||
count = 0
|
count = 0
|
||||||
for dataset in content:
|
for dataset in content:
|
||||||
|
@ -10,33 +10,31 @@
|
|||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import inspect
|
|
||||||
import re
|
|
||||||
import six
|
|
||||||
from six.moves import zip_longest
|
|
||||||
|
|
||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
from types import FunctionType
|
|
||||||
from unittest import TestCase
|
from unittest import TestCase
|
||||||
from warnings import warn, simplefilter
|
from warnings import warn, simplefilter
|
||||||
|
import inspect
|
||||||
|
import re
|
||||||
|
|
||||||
from cafe.common.reporting import cclogging
|
from cafe.common.reporting import cclogging
|
||||||
|
from cafe.drivers.unittest.datasets import DatasetList
|
||||||
|
|
||||||
TAGS_DECORATOR_TAG_LIST_NAME = "__test_tags__"
|
|
||||||
TAGS_DECORATOR_ATTR_DICT_NAME = "__test_attrs__"
|
|
||||||
DATA_DRIVEN_TEST_ATTR = "__data_driven_test_data__"
|
DATA_DRIVEN_TEST_ATTR = "__data_driven_test_data__"
|
||||||
DATA_DRIVEN_TEST_PREFIX = "ddtest_"
|
DATA_DRIVEN_TEST_PREFIX = "ddtest_"
|
||||||
|
TAGS_DECORATOR_ATTR_DICT_NAME = "__test_attrs__"
|
||||||
|
TAGS_DECORATOR_TAG_LIST_NAME = "__test_tags__"
|
||||||
|
PARALLEL_TAGS_LIST_ATTR = "__parallel_test_tags__"
|
||||||
|
|
||||||
|
|
||||||
class DataDrivenFixtureError(Exception):
|
class DataDrivenFixtureError(Exception):
|
||||||
|
"""Error if you apply DataDrivenClass to class that isn't a TestCase"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
|
||||||
def _add_tags(func, tags):
|
def _add_tags(func, tags, attr):
|
||||||
if not getattr(func, TAGS_DECORATOR_TAG_LIST_NAME, None):
|
if not getattr(func, attr, None):
|
||||||
setattr(func, TAGS_DECORATOR_TAG_LIST_NAME, [])
|
setattr(func, attr, [])
|
||||||
func.__test_tags__ = list(set(func.__test_tags__).union(set(tags)))
|
setattr(func, attr, list(set(getattr(func, attr)).union(set(tags))))
|
||||||
return func
|
return func
|
||||||
|
|
||||||
|
|
||||||
@ -52,8 +50,15 @@ def tags(*tags, **attrs):
|
|||||||
cafe-runner at run time
|
cafe-runner at run time
|
||||||
"""
|
"""
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
func = _add_tags(func, tags)
|
"""Calls _add_tags/_add_attrs to add tags to a func"""
|
||||||
|
func = _add_tags(func, tags, TAGS_DECORATOR_TAG_LIST_NAME)
|
||||||
func = _add_attrs(func, attrs)
|
func = _add_attrs(func, attrs)
|
||||||
|
|
||||||
|
# add tags for parallel runner
|
||||||
|
func = _add_tags(func, tags, PARALLEL_TAGS_LIST_ATTR)
|
||||||
|
func = _add_tags(
|
||||||
|
func, ["{0}={1}".format(k, v) for k, v in attrs.items()],
|
||||||
|
PARALLEL_TAGS_LIST_ATTR)
|
||||||
return func
|
return func
|
||||||
return decorator
|
return decorator
|
||||||
|
|
||||||
@ -61,11 +66,19 @@ def tags(*tags, **attrs):
|
|||||||
def data_driven_test(*dataset_sources, **kwargs):
|
def data_driven_test(*dataset_sources, **kwargs):
|
||||||
"""Used to define the data source for a data driven test in a
|
"""Used to define the data source for a data driven test in a
|
||||||
DataDrivenFixture decorated Unittest TestCase class"""
|
DataDrivenFixture decorated Unittest TestCase class"""
|
||||||
|
|
||||||
def decorator(func):
|
def decorator(func):
|
||||||
# dataset_source checked for backward compatibility
|
"""Combines and stores DatasetLists in __data_driven_test_data__"""
|
||||||
combined_lists = kwargs.get("dataset_source") or []
|
dep_message = "DatasetList object required for data_generator"
|
||||||
|
combined_lists = kwargs.get("dataset_source") or DatasetList()
|
||||||
|
for key, value in kwargs:
|
||||||
|
if key != "dataset_source" and isinstance(value, DatasetList):
|
||||||
|
value.apply_test_tags(key)
|
||||||
|
elif not isinstance(value, DatasetList):
|
||||||
|
warn(dep_message, DeprecationWarning)
|
||||||
|
combined_lists += value
|
||||||
for dataset_list in dataset_sources:
|
for dataset_list in dataset_sources:
|
||||||
|
if not isinstance(dataset_list, DatasetList):
|
||||||
|
warn(dep_message, DeprecationWarning)
|
||||||
combined_lists += dataset_list
|
combined_lists += dataset_list
|
||||||
setattr(func, DATA_DRIVEN_TEST_ATTR, combined_lists)
|
setattr(func, DATA_DRIVEN_TEST_ATTR, combined_lists)
|
||||||
return func
|
return func
|
||||||
@ -75,6 +88,9 @@ def data_driven_test(*dataset_sources, **kwargs):
|
|||||||
def DataDrivenClass(*dataset_lists):
|
def DataDrivenClass(*dataset_lists):
|
||||||
"""Use data driven class decorator. designed to be used on a fixture"""
|
"""Use data driven class decorator. designed to be used on a fixture"""
|
||||||
def decorator(cls):
|
def decorator(cls):
|
||||||
|
"""Creates classes with variables named after datasets.
|
||||||
|
Names of classes are equal to (class_name with out fixture) + ds_name
|
||||||
|
"""
|
||||||
module = import_module(cls.__module__)
|
module = import_module(cls.__module__)
|
||||||
cls = DataDrivenFixture(cls)
|
cls = DataDrivenFixture(cls)
|
||||||
class_name = re.sub("fixture", "", cls.__name__, flags=re.IGNORECASE)
|
class_name = re.sub("fixture", "", cls.__name__, flags=re.IGNORECASE)
|
||||||
@ -93,66 +109,52 @@ def DataDrivenClass(*dataset_lists):
|
|||||||
def DataDrivenFixture(cls):
|
def DataDrivenFixture(cls):
|
||||||
"""Generates new unittest test methods from methods defined in the
|
"""Generates new unittest test methods from methods defined in the
|
||||||
decorated class"""
|
decorated class"""
|
||||||
|
def create_func(original_test, new_name, kwargs):
|
||||||
|
"""Creates a function to add to class for ddtests"""
|
||||||
|
def new_test(self):
|
||||||
|
"""Docstring gets replaced by test docstring"""
|
||||||
|
func = getattr(self, original_test.__name__)
|
||||||
|
func(**kwargs)
|
||||||
|
new_test.__name__ = new_name
|
||||||
|
new_test.__doc__ = original_test.__doc__
|
||||||
|
return new_test
|
||||||
|
|
||||||
if not issubclass(cls, TestCase):
|
if not issubclass(cls, TestCase):
|
||||||
raise DataDrivenFixtureError
|
raise DataDrivenFixtureError
|
||||||
|
|
||||||
test_case_attrs = dir(cls)
|
for attr_name in dir(cls):
|
||||||
for attr_name in test_case_attrs:
|
|
||||||
if attr_name.startswith(DATA_DRIVEN_TEST_PREFIX) is False:
|
if attr_name.startswith(DATA_DRIVEN_TEST_PREFIX) is False:
|
||||||
# Not a data driven test, skip it
|
# Not a data driven test, skip it
|
||||||
continue
|
continue
|
||||||
|
original_test = getattr(cls, attr_name, None)
|
||||||
original_test = getattr(cls, attr_name, None).__func__
|
if not callable(original_test):
|
||||||
test_data = getattr(original_test, DATA_DRIVEN_TEST_ATTR, None)
|
|
||||||
|
|
||||||
if test_data is None:
|
|
||||||
# no data was provided to the datasource decorator or this is not a
|
|
||||||
# data driven test, skip it.
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
|
test_data = getattr(original_test, DATA_DRIVEN_TEST_ATTR, [])
|
||||||
|
|
||||||
for dataset in test_data:
|
for dataset in test_data:
|
||||||
# Name the new test based on original and dataset names
|
# Name the new test based on original and dataset names
|
||||||
base_test_name = str(original_test.__name__)[
|
base_test_name = attr_name[int(len(DATA_DRIVEN_TEST_PREFIX)):]
|
||||||
int(len(DATA_DRIVEN_TEST_PREFIX)):]
|
new_test_name = "test_{0}_{1}".format(base_test_name, dataset.name)
|
||||||
new_test_name = "test_{0}_{1}".format(
|
|
||||||
base_test_name, dataset.name)
|
|
||||||
|
|
||||||
# Create a new test from the old test
|
new_test = create_func(original_test, new_test_name, dataset.data)
|
||||||
new_test = FunctionType(
|
|
||||||
six.get_function_code(original_test),
|
|
||||||
six.get_function_globals(original_test),
|
|
||||||
name=new_test_name)
|
|
||||||
|
|
||||||
# Copy over any other attributes the original test had (mainly to
|
# Copy over any other attributes the original test had (mainly to
|
||||||
# support test tag decorator)
|
# support test tag decorator)
|
||||||
for attr in list(set(dir(original_test)) - set(dir(new_test))):
|
for key, value in vars(original_test).items():
|
||||||
setattr(new_test, attr, getattr(original_test, attr))
|
if key != DATA_DRIVEN_TEST_ATTR:
|
||||||
|
setattr(new_test, key, value)
|
||||||
# Change the new test's default keyword values to the appropriate
|
|
||||||
# new data as defined by the datasource decorator
|
|
||||||
args, _, _, defaults = inspect.getargspec(original_test)
|
|
||||||
|
|
||||||
# Self doesn't have a default, so we need to remove it
|
|
||||||
args.remove('self')
|
|
||||||
|
|
||||||
# Make sure we take into account required arguments
|
|
||||||
kwargs = dict(
|
|
||||||
zip_longest(
|
|
||||||
args[::-1], list(defaults or ())[::-1], fillvalue=None))
|
|
||||||
|
|
||||||
kwargs.update(dataset.data)
|
|
||||||
|
|
||||||
# Make sure the updated values are in the correct order
|
|
||||||
new_default_values = [kwargs[arg] for arg in args]
|
|
||||||
setattr(new_test, "func_defaults", tuple(new_default_values))
|
|
||||||
|
|
||||||
# Set dataset tags and attrs
|
# Set dataset tags and attrs
|
||||||
new_test = _add_tags(new_test, dataset.metadata.get('tags', []))
|
new_test = _add_tags(
|
||||||
|
new_test, dataset.metadata.get('tags', []),
|
||||||
|
TAGS_DECORATOR_TAG_LIST_NAME)
|
||||||
|
new_test = _add_tags(
|
||||||
|
new_test, dataset.metadata.get('tags', []),
|
||||||
|
PARALLEL_TAGS_LIST_ATTR)
|
||||||
|
|
||||||
# Add the new test to the decorated TestCase
|
# Add the new test to the decorated TestCase
|
||||||
setattr(cls, new_test_name, new_test)
|
setattr(cls, new_test_name, new_test)
|
||||||
|
|
||||||
return cls
|
return cls
|
||||||
|
|
||||||
|
|
||||||
@ -216,6 +218,7 @@ class memoized(object):
|
|||||||
return self.func.__doc__
|
return self.func.__doc__
|
||||||
|
|
||||||
def _start_logging(self, log_file_name):
|
def _start_logging(self, log_file_name):
|
||||||
|
"""Starts logging"""
|
||||||
setattr(self.func, '_log_handler', cclogging.setup_new_cchandler(
|
setattr(self.func, '_log_handler', cclogging.setup_new_cchandler(
|
||||||
log_file_name))
|
log_file_name))
|
||||||
setattr(self.func, '_log', cclogging.getLogger(''))
|
setattr(self.func, '_log', cclogging.getLogger(''))
|
||||||
@ -230,4 +233,5 @@ class memoized(object):
|
|||||||
self.__name__))
|
self.__name__))
|
||||||
|
|
||||||
def _stop_logging(self):
|
def _stop_logging(self):
|
||||||
self.func._log.removeHandler(self.func._log_handler)
|
"""Stop logging"""
|
||||||
|
self.func._log.removeHandler(self.func._log_handler)
|
||||||
|
@ -11,6 +11,11 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
@summary: Base Classes for Test Fixtures
|
||||||
|
@note: Corresponds DIRECTLY TO A unittest.TestCase
|
||||||
|
@see: http://docs.python.org/library/unittest.html#unittest.TestCase
|
||||||
|
"""
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import six
|
import six
|
||||||
@ -22,18 +27,17 @@ from cafe.drivers.base import FixtureReporter
|
|||||||
|
|
||||||
class BaseTestFixture(unittest.TestCase):
|
class BaseTestFixture(unittest.TestCase):
|
||||||
"""
|
"""
|
||||||
Base class that all cafe unittest test fixtures should inherit from
|
@summary: This should be used as the base class for any unittest tests,
|
||||||
|
meant to be used instead of unittest.TestCase.
|
||||||
.. seealso:: http://docs.python.org/library/unittest.html#unittest.TestCase
|
@see: http://docs.python.org/library/unittest.html#unittest.TestCase
|
||||||
"""
|
"""
|
||||||
|
|
||||||
__test__ = True
|
__test__ = True
|
||||||
|
|
||||||
def shortDescription(self):
|
def shortDescription(self):
|
||||||
"""
|
"""
|
||||||
Returns a formatted description of the test
|
@summary: Returns a formatted description of the test
|
||||||
"""
|
"""
|
||||||
|
|
||||||
short_desc = None
|
short_desc = None
|
||||||
|
|
||||||
if os.environ.get("VERBOSE", None) == "true" and self._testMethodDoc:
|
if os.environ.get("VERBOSE", None) == "true" and self._testMethodDoc:
|
||||||
@ -42,6 +46,9 @@ class BaseTestFixture(unittest.TestCase):
|
|||||||
return short_desc
|
return short_desc
|
||||||
|
|
||||||
def logDescription(self):
|
def logDescription(self):
|
||||||
|
"""
|
||||||
|
@summary: Returns a formatted description from the _testMethodDoc
|
||||||
|
"""
|
||||||
log_desc = None
|
log_desc = None
|
||||||
if self._testMethodDoc:
|
if self._testMethodDoc:
|
||||||
log_desc = "\n{0}".format(
|
log_desc = "\n{0}".format(
|
||||||
@ -51,22 +58,24 @@ class BaseTestFixture(unittest.TestCase):
|
|||||||
@classmethod
|
@classmethod
|
||||||
def assertClassSetupFailure(cls, message):
|
def assertClassSetupFailure(cls, message):
|
||||||
"""
|
"""
|
||||||
Use this if you need to fail from a Test Fixture's setUpClass()
|
@summary: Use this if you need to fail from a Test Fixture's
|
||||||
|
setUpClass() method
|
||||||
"""
|
"""
|
||||||
cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
|
cls.fixture_log.error("FATAL: %s:%s", cls.__name__, message)
|
||||||
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
|
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def assertClassTeardownFailure(cls, message):
|
def assertClassTeardownFailure(cls, message):
|
||||||
"""
|
"""
|
||||||
Use this if you need to fail from a Test Fixture's tearDownClass()
|
@summary: Use this if you need to fail from a Test Fixture's
|
||||||
|
tearUpClass() method
|
||||||
"""
|
"""
|
||||||
|
cls.fixture_log.error("FATAL: %s:%s", cls.__name__, message)
|
||||||
cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
|
|
||||||
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
|
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def setUpClass(cls):
|
def setUpClass(cls):
|
||||||
|
"""@summary: Adds logging/reporting to Unittest setUpClass"""
|
||||||
super(BaseTestFixture, cls).setUpClass()
|
super(BaseTestFixture, cls).setUpClass()
|
||||||
cls._reporter = FixtureReporter(cls)
|
cls._reporter = FixtureReporter(cls)
|
||||||
cls.fixture_log = cls._reporter.logger.log
|
cls.fixture_log = cls._reporter.logger.log
|
||||||
@ -75,13 +84,14 @@ class BaseTestFixture(unittest.TestCase):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def tearDownClass(cls):
|
def tearDownClass(cls):
|
||||||
|
"""@summary: Adds stop reporting to Unittest setUpClass"""
|
||||||
cls._reporter.stop()
|
cls._reporter.stop()
|
||||||
|
|
||||||
# Call super teardown after to avoid tearing down the class before we
|
# Call super teardown after to avoid tearing down the class before we
|
||||||
# can run our own tear down stuff.
|
# can run our own tear down stuff.
|
||||||
super(BaseTestFixture, cls).tearDownClass()
|
super(BaseTestFixture, cls).tearDownClass()
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
|
"""@summary: Logs test metrics"""
|
||||||
self.shortDescription()
|
self.shortDescription()
|
||||||
self._reporter.start_test_metrics(
|
self._reporter.start_test_metrics(
|
||||||
self.__class__.__name__, self._testMethodName,
|
self.__class__.__name__, self._testMethodName,
|
||||||
@ -94,7 +104,6 @@ class BaseTestFixture(unittest.TestCase):
|
|||||||
better pattern or working with the result object directly.
|
better pattern or working with the result object directly.
|
||||||
This is related to the todo in L{TestRunMetrics}
|
This is related to the todo in L{TestRunMetrics}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if sys.version_info < (3, 4):
|
if sys.version_info < (3, 4):
|
||||||
if six.PY2:
|
if six.PY2:
|
||||||
report = self._resultForDoCleanups
|
report = self._resultForDoCleanups
|
||||||
@ -114,7 +123,7 @@ class BaseTestFixture(unittest.TestCase):
|
|||||||
self._reporter.stop_test_metrics(self._testMethodName,
|
self._reporter.stop_test_metrics(self._testMethodName,
|
||||||
'Passed')
|
'Passed')
|
||||||
else:
|
else:
|
||||||
for method, errors in self._outcome.errors:
|
for method, _ in self._outcome.errors:
|
||||||
if self._test_name_matches_result(self._testMethodName,
|
if self._test_name_matches_result(self._testMethodName,
|
||||||
method):
|
method):
|
||||||
self._reporter.stop_test_metrics(self._testMethodName,
|
self._reporter.stop_test_metrics(self._testMethodName,
|
||||||
@ -125,11 +134,9 @@ class BaseTestFixture(unittest.TestCase):
|
|||||||
# Continue inherited tearDown()
|
# Continue inherited tearDown()
|
||||||
super(BaseTestFixture, self).tearDown()
|
super(BaseTestFixture, self).tearDown()
|
||||||
|
|
||||||
def _test_name_matches_result(self, name, test_result):
|
@staticmethod
|
||||||
"""
|
def _test_name_matches_result(name, test_result):
|
||||||
Checks if a test result matches a specific test name.
|
"""@summary: Checks if a test result matches a specific test name."""
|
||||||
"""
|
|
||||||
|
|
||||||
if sys.version_info < (3, 4):
|
if sys.version_info < (3, 4):
|
||||||
# Try to get the result portion of the tuple
|
# Try to get the result portion of the tuple
|
||||||
try:
|
try:
|
||||||
@ -147,17 +154,14 @@ class BaseTestFixture(unittest.TestCase):
|
|||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _do_class_cleanup_tasks(cls):
|
def _do_class_cleanup_tasks(cls):
|
||||||
"""
|
"""@summary: Runs class cleanup tasks added during testing"""
|
||||||
Runs the tasks designated by the use of addClassCleanup
|
|
||||||
"""
|
|
||||||
|
|
||||||
for func, args, kwargs in reversed(cls._class_cleanup_tasks):
|
for func, args, kwargs in reversed(cls._class_cleanup_tasks):
|
||||||
cls.fixture_log.debug(
|
cls.fixture_log.debug(
|
||||||
"Running class cleanup task: {0}({1}, {2})".format(
|
"Running class cleanup task: %s(%s, %s)",
|
||||||
func.__name__,
|
func.__name__,
|
||||||
", ".join([str(arg) for arg in args]),
|
", ".join([str(arg) for arg in args]),
|
||||||
", ".join(["{0}={1}".format(
|
", ".join(["{0}={1}".format(
|
||||||
str(k), str(kwargs[k])) for k in kwargs])))
|
str(k), str(kwargs[k])) for k in kwargs]))
|
||||||
try:
|
try:
|
||||||
func(*args, **kwargs)
|
func(*args, **kwargs)
|
||||||
except Exception as exception:
|
except Exception as exception:
|
||||||
@ -166,17 +170,15 @@ class BaseTestFixture(unittest.TestCase):
|
|||||||
cls.fixture_log.exception(exception)
|
cls.fixture_log.exception(exception)
|
||||||
cls.fixture_log.error(
|
cls.fixture_log.error(
|
||||||
"classTearDown failure: Exception occured while trying to"
|
"classTearDown failure: Exception occured while trying to"
|
||||||
" execute class teardown task: {0}({1}, {2})".format(
|
" execute class teardown task: %s(%s, %s)",
|
||||||
func.__name__,
|
func.__name__,
|
||||||
", ".join([str(arg) for arg in args]),
|
", ".join([str(arg) for arg in args]),
|
||||||
", ".join(["{0}={1}".format(
|
", ".join(["{0}={1}".format(
|
||||||
str(k), str(kwargs[k])) for k in kwargs])))
|
str(k), str(kwargs[k])) for k in kwargs]))
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def addClassCleanup(cls, function, *args, **kwargs):
|
def addClassCleanup(cls, function, *args, **kwargs):
|
||||||
"""
|
"""@summary: Named to match unittest's addCleanup.
|
||||||
Provides an addCleanup-like method that can be used in classmethods
|
|
||||||
|
|
||||||
ClassCleanup tasks run if setUpClass fails, or after tearDownClass.
|
ClassCleanup tasks run if setUpClass fails, or after tearDownClass.
|
||||||
(They don't depend on tearDownClass running)
|
(They don't depend on tearDownClass running)
|
||||||
"""
|
"""
|
||||||
@ -186,15 +188,16 @@ class BaseTestFixture(unittest.TestCase):
|
|||||||
|
|
||||||
class BaseBurnInTestFixture(BaseTestFixture):
|
class BaseBurnInTestFixture(BaseTestFixture):
|
||||||
"""
|
"""
|
||||||
Base test fixture that allows for Burn-In tests
|
@summary: Base test fixture that allows for Burn-In tests
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def setUpClass(cls):
|
def setUpClass(cls):
|
||||||
|
"""@summary: inits burning testing variables"""
|
||||||
super(BaseBurnInTestFixture, cls).setUpClass()
|
super(BaseBurnInTestFixture, cls).setUpClass()
|
||||||
cls.test_list = []
|
cls.test_list = []
|
||||||
cls.iterations = 0
|
cls.iterations = 0
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def addTest(cls, test_case):
|
def addTest(cls, test_case):
|
||||||
|
"""@summary: Adds a test case"""
|
||||||
cls.test_list.append(test_case)
|
cls.test_list.append(test_case)
|
||||||
|
@ -11,101 +11,78 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from unittest.suite import _ErrorHolder
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
class SummarizeResults(object):
|
class SummarizeResults(object):
|
||||||
|
"""Reads in vars dict from suite and builds a Summarized results obj"""
|
||||||
def __init__(self, result_dict, master_testsuite,
|
def __init__(self, result_dict, tests, execution_time):
|
||||||
execution_time):
|
|
||||||
for keys, values in list(result_dict.items()):
|
|
||||||
setattr(self, keys, values)
|
|
||||||
self.master_testsuite = master_testsuite
|
|
||||||
self.execution_time = execution_time
|
self.execution_time = execution_time
|
||||||
|
self.all_tests = tests
|
||||||
|
self.failures = result_dict.get("failures", [])
|
||||||
|
self.skipped = result_dict.get("skipped", [])
|
||||||
|
self.errors = result_dict.get("errors", [])
|
||||||
|
self.tests_run = result_dict.get("testsRun", 0)
|
||||||
|
|
||||||
def get_passed_tests(self):
|
def get_passed_tests(self):
|
||||||
all_tests = []
|
"""Gets a list of results objects for passed tests"""
|
||||||
failed_tests = []
|
errored_tests = [
|
||||||
skipped_tests = []
|
t[0] for t in self.errors if not isinstance(t[0], _ErrorHolder)]
|
||||||
errored_tests = []
|
setup_errored_classes = [
|
||||||
setup_errored_classes = []
|
str(t[0]).split(".")[-1].rstrip(')')
|
||||||
setup_errored_tests = []
|
for t in self.errors if isinstance(t[0], _ErrorHolder)]
|
||||||
passed_obj_list = []
|
setup_errored_tests = [
|
||||||
for test in vars(self.master_testsuite).get('_tests'):
|
t for t in self.all_tests
|
||||||
all_tests.append(test)
|
if t.__class__.__name__ in setup_errored_classes]
|
||||||
for failed_test in self.failures:
|
|
||||||
failed_tests.append(failed_test[0])
|
|
||||||
for skipped_test in self.skipped:
|
|
||||||
skipped_tests.append(skipped_test[0])
|
|
||||||
for errored_test in self.errors:
|
|
||||||
if errored_test[0].__class__.__name__ != '_ErrorHolder':
|
|
||||||
errored_tests.append(errored_test[0])
|
|
||||||
else:
|
|
||||||
setup_errored_classes.append(
|
|
||||||
str(errored_test[0]).split(".")[-1].rstrip(')'))
|
|
||||||
if len(setup_errored_classes) != 0:
|
|
||||||
for item_1 in all_tests:
|
|
||||||
for item_2 in setup_errored_classes:
|
|
||||||
if item_2 == item_1.__class__.__name__:
|
|
||||||
setup_errored_tests.append(item_1)
|
|
||||||
|
|
||||||
passed_tests = list(set(all_tests) - set(failed_tests) -
|
passed_tests = list(
|
||||||
set(skipped_tests) - set(errored_tests) -
|
set(self.all_tests) -
|
||||||
set(setup_errored_tests))
|
set([test[0] for test in self.failures]) -
|
||||||
|
set([test[0] for test in self.skipped]) -
|
||||||
|
set(errored_tests) - set(setup_errored_tests))
|
||||||
|
|
||||||
for passed_test in passed_tests:
|
return [self._create_result(t) for t in passed_tests]
|
||||||
passed_obj = Result(passed_test.__class__.__name__,
|
|
||||||
vars(passed_test).get('_testMethodName'))
|
|
||||||
passed_obj_list.append(passed_obj)
|
|
||||||
|
|
||||||
return passed_obj_list
|
|
||||||
|
|
||||||
def get_skipped_tests(self):
|
|
||||||
skipped_obj_list = []
|
|
||||||
for item in self.skipped:
|
|
||||||
skipped_obj = Result(item[0].__class__.__name__,
|
|
||||||
vars(item[0]).get('_testMethodName'),
|
|
||||||
skipped_msg=item[1])
|
|
||||||
skipped_obj_list.append(skipped_obj)
|
|
||||||
return skipped_obj_list
|
|
||||||
|
|
||||||
def get_errored_tests(self):
|
|
||||||
errored_obj_list = []
|
|
||||||
for item in self.errors:
|
|
||||||
if item[0].__class__.__name__ is not '_ErrorHolder':
|
|
||||||
errored_obj = Result(item[0].__class__.__name__,
|
|
||||||
vars(item[0]).get('_testMethodName'),
|
|
||||||
error_trace=item[1])
|
|
||||||
else:
|
|
||||||
errored_obj = Result(str(item[0]).split(" ")[0],
|
|
||||||
str(item[0]).split(".")[-1].rstrip(')'),
|
|
||||||
error_trace=item[1])
|
|
||||||
errored_obj_list.append(errored_obj)
|
|
||||||
return errored_obj_list
|
|
||||||
|
|
||||||
def parse_failures(self):
|
|
||||||
failure_obj_list = []
|
|
||||||
for failure in self.failures:
|
|
||||||
failure_obj = Result(failure[0].__class__.__name__,
|
|
||||||
vars(failure[0]).get('_testMethodName'),
|
|
||||||
failure[1])
|
|
||||||
failure_obj_list.append(failure_obj)
|
|
||||||
|
|
||||||
return failure_obj_list
|
|
||||||
|
|
||||||
def summary_result(self):
|
def summary_result(self):
|
||||||
summary_res = {'tests': str(self.testsRun),
|
"""Returns a dictionary containing counts of tests and statuses"""
|
||||||
'errors': str(len(self.errors)),
|
return {
|
||||||
'failures': str(len(self.failures)),
|
'tests': self.tests_run,
|
||||||
'skipped': str(len(self.skipped))}
|
'errors': len(self.errors),
|
||||||
return summary_res
|
'failures': len(self.failures),
|
||||||
|
'skipped': len(self.skipped)}
|
||||||
|
|
||||||
def gather_results(self):
|
def gather_results(self):
|
||||||
executed_tests = (self.get_passed_tests() + self.parse_failures() +
|
"""Gets a result obj for all tests ran and failed setup classes"""
|
||||||
self.get_errored_tests() + self.get_skipped_tests())
|
return (
|
||||||
|
self.get_passed_tests() +
|
||||||
|
[self._create_result(t, "failures") for t in self.failures] +
|
||||||
|
[self._create_result(t, "errored") for t in self.errors] +
|
||||||
|
[self._create_result(t, "skipped") for t in self.skipped])
|
||||||
|
|
||||||
return executed_tests
|
@staticmethod
|
||||||
|
def _create_result(test, type_="passed"):
|
||||||
|
"""Creates a Result object from a test and type of test"""
|
||||||
|
msg_type = {"failures": "failure_trace", "skipped": "skipped_msg",
|
||||||
|
"errored": "error_trace"}
|
||||||
|
if type_ == "passed":
|
||||||
|
dic = {"test_method_name": getattr(test, '_testMethodName', ""),
|
||||||
|
"test_class_name": test.__class__.__name__}
|
||||||
|
|
||||||
|
elif (type_ in ["failures", "skipped", "errored"] and
|
||||||
|
not isinstance(test[0], _ErrorHolder)):
|
||||||
|
dic = {"test_method_name": getattr(test[0], '_testMethodName', ""),
|
||||||
|
"test_class_name": test[0].__class__.__name__,
|
||||||
|
msg_type.get(type_, "error_trace"): test[1]}
|
||||||
|
else:
|
||||||
|
dic = {"test_method_name": str(test[0]).split(" ")[0],
|
||||||
|
"test_class_name": str(test[0]).split(".")[-1].rstrip(')'),
|
||||||
|
msg_type.get(type_, "error_trace"): test[1]}
|
||||||
|
return Result(**dic)
|
||||||
|
|
||||||
|
|
||||||
class Result(object):
|
class Result(object):
|
||||||
|
"""Result object used to create the json and xml results"""
|
||||||
def __init__(
|
def __init__(
|
||||||
self, test_class_name, test_method_name, failure_trace=None,
|
self, test_class_name, test_method_name, failure_trace=None,
|
||||||
skipped_msg=None, error_trace=None):
|
skipped_msg=None, error_trace=None):
|
||||||
@ -117,7 +94,4 @@ class Result(object):
|
|||||||
self.error_trace = error_trace
|
self.error_trace = error_trace
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
values = []
|
return json.dumps(self.__dict__)
|
||||||
for prop in self.__dict__:
|
|
||||||
values.append("%s: %s" % (prop, self.__dict__[prop]))
|
|
||||||
return dict('{' + ', '.join(values) + '}')
|
|
||||||
|
@ -16,23 +16,23 @@ from __future__ import print_function
|
|||||||
from inspect import isclass, ismethod
|
from inspect import isclass, ismethod
|
||||||
import importlib
|
import importlib
|
||||||
import pkgutil
|
import pkgutil
|
||||||
import re
|
|
||||||
import unittest
|
import unittest
|
||||||
|
import uuid
|
||||||
|
|
||||||
from cafe.drivers.unittest.common import print_exception, get_error
|
from cafe.drivers.base import print_exception, get_error
|
||||||
from cafe.drivers.unittest.suite import OpenCafeUnittestTestSuite
|
from cafe.drivers.unittest.suite import OpenCafeUnittestTestSuite
|
||||||
from cafe.drivers.unittest.decorators import TAGS_LIST_ATTR
|
from cafe.drivers.unittest.decorators import PARALLEL_TAGS_LIST_ATTR
|
||||||
|
|
||||||
|
|
||||||
class SuiteBuilder(object):
|
class SuiteBuilder(object):
|
||||||
"""Builds suites for OpenCafe Unittest Runner"""
|
"""Builds suites for OpenCafe Unittest Runner"""
|
||||||
def __init__(
|
def __init__(
|
||||||
self, testrepos, tags=None, all_tags=False, dotpath_regex=None,
|
self, testrepos, tags=None, all_tags=False, regex_list=None,
|
||||||
file_=None, dry_run=False, exit_on_error=False):
|
file_=None, dry_run=False, exit_on_error=False):
|
||||||
self.testrepos = testrepos
|
self.testrepos = testrepos
|
||||||
self.tags = tags or []
|
self.tags = tags or []
|
||||||
self.all_tags = all_tags
|
self.all_tags = all_tags
|
||||||
self.regex_list = dotpath_regex or []
|
self.regex_list = regex_list or []
|
||||||
self.exit_on_error = exit_on_error
|
self.exit_on_error = exit_on_error
|
||||||
self.dry_run = dry_run
|
self.dry_run = dry_run
|
||||||
# dict format {"ubroast.test.test1.TestClass": ["test_t1", "test_t2"]}
|
# dict format {"ubroast.test.test1.TestClass": ["test_t1", "test_t2"]}
|
||||||
@ -52,6 +52,8 @@ class SuiteBuilder(object):
|
|||||||
for test in suite:
|
for test in suite:
|
||||||
print(test)
|
print(test)
|
||||||
exit(0)
|
exit(0)
|
||||||
|
for suite in test_suites:
|
||||||
|
suite.cafe_uuid = uuid.uuid4()
|
||||||
return test_suites
|
return test_suites
|
||||||
|
|
||||||
def load_file(self):
|
def load_file(self):
|
||||||
@ -99,10 +101,6 @@ class SuiteBuilder(object):
|
|||||||
obj = getattr(loaded_module, objname, None)
|
obj = getattr(loaded_module, objname, None)
|
||||||
if (isclass(obj) and issubclass(obj, unittest.TestCase) and
|
if (isclass(obj) and issubclass(obj, unittest.TestCase) and
|
||||||
"fixture" not in obj.__name__.lower()):
|
"fixture" not in obj.__name__.lower()):
|
||||||
if getattr(obj, "__test__", None) is not None:
|
|
||||||
print("Feature __test__ deprecated: Not skipping:"
|
|
||||||
"{0}".format(obj.__name__))
|
|
||||||
print("Use unittest.skip(reason)")
|
|
||||||
classes.append(obj)
|
classes.append(obj)
|
||||||
return classes
|
return classes
|
||||||
|
|
||||||
@ -122,7 +120,7 @@ class SuiteBuilder(object):
|
|||||||
ret_val = ismethod(test) and self._check_tags(test)
|
ret_val = ismethod(test) and self._check_tags(test)
|
||||||
regex_val = not self.regex_list
|
regex_val = not self.regex_list
|
||||||
for regex in self.regex_list:
|
for regex in self.regex_list:
|
||||||
regex_val |= bool(re.search(regex, full_path))
|
regex_val |= bool(regex.search(full_path))
|
||||||
return ret_val & regex_val
|
return ret_val & regex_val
|
||||||
|
|
||||||
def _check_tags(self, test):
|
def _check_tags(self, test):
|
||||||
@ -133,7 +131,7 @@ class SuiteBuilder(object):
|
|||||||
foo and bar will be matched including a test that contains
|
foo and bar will be matched including a test that contains
|
||||||
(foo, bar, bazz)
|
(foo, bar, bazz)
|
||||||
"""
|
"""
|
||||||
test_tags = getattr(test, TAGS_LIST_ATTR, [])
|
test_tags = getattr(test, PARALLEL_TAGS_LIST_ATTR, [])
|
||||||
if self.all_tags:
|
if self.all_tags:
|
||||||
return all([tag in test_tags for tag in self.tags])
|
return all([tag in test_tags for tag in self.tags])
|
||||||
else:
|
else:
|
@ -1,40 +0,0 @@
|
|||||||
# Copyright 2015 Rackspace
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from __future__ import print_function
|
|
||||||
import sys
|
|
||||||
from traceback import print_exc
|
|
||||||
|
|
||||||
|
|
||||||
def print_exception(file_=None, method=None, value=None, exception=None):
|
|
||||||
"""
|
|
||||||
Prints exceptions in a standard format to stderr.
|
|
||||||
"""
|
|
||||||
print("{0}".format("=" * 70), file=sys.stderr)
|
|
||||||
if file_:
|
|
||||||
print("{0}:".format(file_), file=sys.stderr, end=" ")
|
|
||||||
if method:
|
|
||||||
print("{0}:".format(method), file=sys.stderr, end=" ")
|
|
||||||
if value:
|
|
||||||
print("{0}:".format(value), file=sys.stderr, end=" ")
|
|
||||||
if exception:
|
|
||||||
print("{0}:".format(exception), file=sys.stderr, end=" ")
|
|
||||||
print("\n{0}".format("-" * 70), file=sys.stderr)
|
|
||||||
if exception is not None:
|
|
||||||
print_exc(file=sys.stderr)
|
|
||||||
print(file=sys.stderr)
|
|
||||||
|
|
||||||
|
|
||||||
def get_error(exception=None):
|
|
||||||
"""Gets errno from exception or returns one"""
|
|
||||||
return getattr(exception, "errno", 1)
|
|
@ -1,164 +0,0 @@
|
|||||||
# Copyright 2015 Rackspace
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from itertools import product
|
|
||||||
from string import ascii_letters, digits
|
|
||||||
import json
|
|
||||||
|
|
||||||
ALLOWED_FIRST_CHAR = "_{0}".format(ascii_letters)
|
|
||||||
ALLOWED_OTHER_CHARS = "{0}{1}".format(ALLOWED_FIRST_CHAR, digits)
|
|
||||||
|
|
||||||
|
|
||||||
class _Dataset(object):
|
|
||||||
"""Defines a set of data to be used as input for a data driven test.
|
|
||||||
data_dict should be a dictionary with keys matching the keyword
|
|
||||||
arguments defined in test method that consumes the dataset.
|
|
||||||
name should be a string describing the dataset.
|
|
||||||
This class should not be accessed directly. Use or extend DatasetList.
|
|
||||||
"""
|
|
||||||
def __init__(self, name, data_dict, tags=None):
|
|
||||||
self.name = name
|
|
||||||
self.data = data_dict
|
|
||||||
self.metadata = {'tags': tags or []}
|
|
||||||
|
|
||||||
def apply_test_tags(self, tags):
|
|
||||||
"""Applys tags to dataset"""
|
|
||||||
self.metadata['tags'] = list(set(self.metadata.get('tags') + tags))
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return "<name:{0}, data:{1}>".format(self.name, self.data)
|
|
||||||
|
|
||||||
|
|
||||||
class DatasetList(list):
|
|
||||||
"""Specialized list-like object that holds Dataset objects"""
|
|
||||||
|
|
||||||
def append(self, dataset):
|
|
||||||
if not isinstance(dataset, _Dataset):
|
|
||||||
raise TypeError(
|
|
||||||
"append() argument must be type Dataset, not {0}".format(
|
|
||||||
type(dataset)))
|
|
||||||
|
|
||||||
super(DatasetList, self).append(dataset)
|
|
||||||
|
|
||||||
def append_new_dataset(self, name, data_dict, tags=None):
|
|
||||||
"""Creates and appends a new Dataset"""
|
|
||||||
self.append(_Dataset(name, data_dict, tags))
|
|
||||||
|
|
||||||
def extend(self, dataset_list):
|
|
||||||
if not isinstance(dataset_list, DatasetList):
|
|
||||||
raise TypeError(
|
|
||||||
"extend() argument must be type DatasetList, not {0}".format(
|
|
||||||
type(dataset_list)))
|
|
||||||
super(DatasetList, self).extend(dataset_list)
|
|
||||||
|
|
||||||
def extend_new_datasets(self, dataset_list):
|
|
||||||
"""Creates and extends a new DatasetList"""
|
|
||||||
self.extend(dataset_list)
|
|
||||||
|
|
||||||
def apply_test_tags(self, *tags):
|
|
||||||
"""Applys tags to all tests in dataset list"""
|
|
||||||
for dataset in self:
|
|
||||||
dataset.apply_test_tags(tags)
|
|
||||||
|
|
||||||
def dataset_names(self):
|
|
||||||
"""Gets a list of dataset names from dataset list"""
|
|
||||||
return [ds.name for ds in self]
|
|
||||||
|
|
||||||
def dataset_name_map(self):
|
|
||||||
"""Creates a dictionary with key=count and value=dataset name"""
|
|
||||||
return {count: ds.name for count, ds in enumerate(self)}
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def replace_invalid_characters(string, new_char="_"):
|
|
||||||
"""This functions corrects string so the following is true
|
|
||||||
Identifiers (also referred to as names) are described by the
|
|
||||||
following lexical definitions:
|
|
||||||
identifier ::= (letter|"_") (letter | digit | "_")*
|
|
||||||
letter ::= lowercase | uppercase
|
|
||||||
lowercase ::= "a"..."z"
|
|
||||||
uppercase ::= "A"..."Z"
|
|
||||||
digit ::= "0"..."9"
|
|
||||||
"""
|
|
||||||
if not string:
|
|
||||||
return string
|
|
||||||
for char in set(string) - set(ALLOWED_OTHER_CHARS):
|
|
||||||
string = string.replace(char, new_char)
|
|
||||||
if string[0] in digits:
|
|
||||||
string = "{0}{1}".format(new_char, string[1:])
|
|
||||||
return string
|
|
||||||
|
|
||||||
|
|
||||||
class DatasetListCombiner(DatasetList):
|
|
||||||
"""Class that can be used to combine multiple DatasetList objects together.
|
|
||||||
Produces the product of combining every dataset from each list together
|
|
||||||
with the names merged together. The data is overridden in a cascading
|
|
||||||
fashion, similar to CSS, where the last dataset takes priority.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *datasets):
|
|
||||||
super(DatasetListCombiner, self).__init__()
|
|
||||||
for dataset_list in product(*datasets):
|
|
||||||
tmp_dic = {}
|
|
||||||
names = []
|
|
||||||
for dataset in dataset_list:
|
|
||||||
tmp_dic.update(dataset.data)
|
|
||||||
names.append(dataset.name)
|
|
||||||
self.append_new_dataset("_".join(names), tmp_dic)
|
|
||||||
|
|
||||||
|
|
||||||
class DatasetGenerator(DatasetList):
|
|
||||||
"""Generates Datasets from a list of dictionaries, which are named
|
|
||||||
numericaly according to the source dictionary's order in the source list.
|
|
||||||
If a base_dataset_name is provided, that is used as the base name postfix
|
|
||||||
for all tests before they are numbered.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, list_of_dicts, base_dataset_name=None):
|
|
||||||
super(DatasetGenerator, self).__init__()
|
|
||||||
count = 0
|
|
||||||
for kwdict in list_of_dicts:
|
|
||||||
test_name = "{0}_{1}".format(base_dataset_name or "dataset", count)
|
|
||||||
self.append_new_dataset(test_name, kwdict)
|
|
||||||
count += 1
|
|
||||||
|
|
||||||
|
|
||||||
class TestMultiplier(DatasetList):
|
|
||||||
"""Creates num_range number of copies of the source test,
|
|
||||||
and names the new tests numerically. Does not generate Datasets.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, num_range):
|
|
||||||
super(TestMultiplier, self).__init__()
|
|
||||||
for num in range(num_range):
|
|
||||||
name = "{0}".format(num)
|
|
||||||
self.append_new_dataset(name, dict())
|
|
||||||
|
|
||||||
|
|
||||||
class DatasetFileLoader(DatasetList):
|
|
||||||
"""Reads a file object's contents in as json and converts them to
|
|
||||||
lists of Dataset objects.
|
|
||||||
Files should be opened in 'rb' (read binady) mode.
|
|
||||||
File should be a list of dictionaries following this format:
|
|
||||||
[{'name':"dataset_name", 'data':{key:value, key:value, ...}},]
|
|
||||||
if name is ommited, it is replaced with that dataset's location in the
|
|
||||||
load order, so that not all datasets need to be named.
|
|
||||||
"""
|
|
||||||
def __init__(self, file_object):
|
|
||||||
super(DatasetFileLoader, self).__init__()
|
|
||||||
content = json.loads(str(file_object.read()))
|
|
||||||
count = 0
|
|
||||||
for dataset in content:
|
|
||||||
name = dataset.get('name', str(count))
|
|
||||||
data = dataset.get('data', dict())
|
|
||||||
self.append_new_dataset(name, data)
|
|
||||||
count += 1
|
|
@ -1,206 +0,0 @@
|
|||||||
# Copyright 2015 Rackspace
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
import inspect
|
|
||||||
import re
|
|
||||||
|
|
||||||
from importlib import import_module
|
|
||||||
from unittest import TestCase
|
|
||||||
|
|
||||||
from cafe.common.reporting import cclogging
|
|
||||||
from cafe.drivers.unittest.datasets import DatasetList
|
|
||||||
|
|
||||||
TAGS_LIST_ATTR = "__test_tags__"
|
|
||||||
DATA_DRIVEN_TEST_ATTR = "__data_driven_test_data__"
|
|
||||||
DATA_DRIVEN_TEST_PREFIX = "ddtest_"
|
|
||||||
|
|
||||||
|
|
||||||
class DataDrivenFixtureError(Exception):
|
|
||||||
"""Error if you apply DataDrivenClass to func that isn't a TestCase"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def _add_tags(func, tag_list):
|
|
||||||
"""Adds tages to a function, stored in __test_tags__ variable"""
|
|
||||||
func.__test_tags__ = list(set(
|
|
||||||
getattr(func, TAGS_LIST_ATTR, []) + tag_list))
|
|
||||||
return func
|
|
||||||
|
|
||||||
|
|
||||||
def tags(*tag_list, **attrs):
|
|
||||||
"""Adds tags and attributes to tests, which are interpreted by the
|
|
||||||
cafe-runner at run time
|
|
||||||
"""
|
|
||||||
def decorator(func):
|
|
||||||
"""Calls _add_tags to add tags to a function"""
|
|
||||||
func = _add_tags(func, list(tag_list))
|
|
||||||
func = _add_tags(func, [
|
|
||||||
"{0}={1}".format(k, v) for k, v in attrs.items()])
|
|
||||||
return func
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
def data_driven_test(*dataset_sources, **kwargs):
|
|
||||||
"""Used to define the data source for a data driven test in a
|
|
||||||
DataDrivenFixture decorated Unittest TestCase class"""
|
|
||||||
|
|
||||||
def decorator(func):
|
|
||||||
"""Combines and stores DatasetLists in __data_driven_test_data__"""
|
|
||||||
combined_lists = DatasetList()
|
|
||||||
for key, value in kwargs:
|
|
||||||
if isinstance(value, DatasetList):
|
|
||||||
value.apply_test_tags(key)
|
|
||||||
else:
|
|
||||||
print "DeprecationWarning Warning: non DataSetList passed to",
|
|
||||||
print " data generator."
|
|
||||||
combined_lists += value
|
|
||||||
for dataset_list in dataset_sources:
|
|
||||||
combined_lists += dataset_list
|
|
||||||
setattr(func, DATA_DRIVEN_TEST_ATTR, combined_lists)
|
|
||||||
return func
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
def DataDrivenClass(*dataset_lists):
|
|
||||||
"""Use data driven class decorator. designed to be used on a fixture"""
|
|
||||||
def decorator(cls):
|
|
||||||
"""Creates classes with variables named after datasets.
|
|
||||||
Names of classes are equal to (class_name with out fixture) + ds_name
|
|
||||||
"""
|
|
||||||
module = import_module(cls.__module__)
|
|
||||||
cls = DataDrivenFixture(cls)
|
|
||||||
class_name = re.sub("fixture", "", cls.__name__, flags=re.IGNORECASE)
|
|
||||||
if not re.match(".*fixture", cls.__name__, flags=re.IGNORECASE):
|
|
||||||
cls.__name__ = "{0}Fixture".format(cls.__name__)
|
|
||||||
for dataset_list in dataset_lists:
|
|
||||||
for dataset in dataset_list:
|
|
||||||
class_name_new = "{0}_{1}".format(class_name, dataset.name)
|
|
||||||
class_name_new = DatasetList.replace_invalid_characters(
|
|
||||||
class_name_new)
|
|
||||||
new_class = type(class_name_new, (cls,), dataset.data)
|
|
||||||
new_class.__module__ = cls.__module__
|
|
||||||
setattr(module, class_name_new, new_class)
|
|
||||||
return cls
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
def DataDrivenFixture(cls):
|
|
||||||
"""Generates new unittest test methods from methods defined in the
|
|
||||||
decorated class"""
|
|
||||||
def create_func(original_test, new_name, kwargs):
|
|
||||||
"""Creates a function to add to class for ddtests"""
|
|
||||||
def new_test(self):
|
|
||||||
"""Docstring gets replaced by test docstring"""
|
|
||||||
func = getattr(self, original_test.__name__)
|
|
||||||
func(**kwargs)
|
|
||||||
new_test.__name__ = new_name
|
|
||||||
new_test.__doc__ = original_test.__doc__
|
|
||||||
return new_test
|
|
||||||
|
|
||||||
if not issubclass(cls, TestCase):
|
|
||||||
raise DataDrivenFixtureError
|
|
||||||
|
|
||||||
for attr_name in dir(cls):
|
|
||||||
if attr_name.startswith(DATA_DRIVEN_TEST_PREFIX) is False:
|
|
||||||
# Not a data driven test, skip it
|
|
||||||
continue
|
|
||||||
original_test = getattr(cls, attr_name, None)
|
|
||||||
if not callable(original_test):
|
|
||||||
continue
|
|
||||||
|
|
||||||
test_data = getattr(original_test, DATA_DRIVEN_TEST_ATTR, [])
|
|
||||||
|
|
||||||
for dataset in test_data:
|
|
||||||
# Name the new test based on original and dataset names
|
|
||||||
base_test_name = attr_name[int(len(DATA_DRIVEN_TEST_PREFIX)):]
|
|
||||||
new_test_name = DatasetList.replace_invalid_characters(
|
|
||||||
"test_{0}_{1}".format(base_test_name, dataset.name))
|
|
||||||
|
|
||||||
new_test = create_func(original_test, new_test_name, dataset.data)
|
|
||||||
|
|
||||||
# Copy over any other attributes the original test had (mainly to
|
|
||||||
# support test tag decorator)
|
|
||||||
for key, value in vars(original_test).items():
|
|
||||||
if key != DATA_DRIVEN_TEST_ATTR:
|
|
||||||
setattr(new_test, key, value)
|
|
||||||
|
|
||||||
# Set dataset tags and attrs
|
|
||||||
new_test = _add_tags(new_test, dataset.metadata.get('tags', []))
|
|
||||||
|
|
||||||
# Add the new test to the decorated TestCase
|
|
||||||
setattr(cls, new_test_name, new_test)
|
|
||||||
return cls
|
|
||||||
|
|
||||||
|
|
||||||
class memoized(object):
|
|
||||||
|
|
||||||
"""
|
|
||||||
Decorator.
|
|
||||||
@see: https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize
|
|
||||||
Caches a function's return value each time it is called.
|
|
||||||
If called later with the same arguments, the cached value is returned
|
|
||||||
(not reevaluated).
|
|
||||||
|
|
||||||
Adds and removes handlers to root log for the duration of the function
|
|
||||||
call, or logs return of cached result.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, func):
|
|
||||||
self.func = func
|
|
||||||
self.cache = {}
|
|
||||||
self.__name__ = func.__name__
|
|
||||||
|
|
||||||
def __call__(self, *args):
|
|
||||||
log_name = "{0}.{1}".format(
|
|
||||||
cclogging.get_object_namespace(args[0]), self.__name__)
|
|
||||||
self._start_logging(log_name)
|
|
||||||
|
|
||||||
try:
|
|
||||||
hash(args)
|
|
||||||
except TypeError: # unhashable arguments in args
|
|
||||||
value = self.func(*args)
|
|
||||||
debug = "Uncacheable. Data returned"
|
|
||||||
else:
|
|
||||||
if args in self.cache:
|
|
||||||
value = self.cache[args]
|
|
||||||
debug = "Cached data returned."
|
|
||||||
else:
|
|
||||||
value = self.cache[args] = self.func(*args)
|
|
||||||
debug = "Data cached for future calls"
|
|
||||||
|
|
||||||
self.func._log.debug(debug)
|
|
||||||
self._stop_logging()
|
|
||||||
return value
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
"""Return the function's docstring."""
|
|
||||||
return self.func.__doc__
|
|
||||||
|
|
||||||
def _start_logging(self, log_file_name):
|
|
||||||
"""Starts logging"""
|
|
||||||
setattr(self.func, '_log_handler', cclogging.setup_new_cchandler(
|
|
||||||
log_file_name))
|
|
||||||
setattr(self.func, '_log', cclogging.getLogger(''))
|
|
||||||
self.func._log.addHandler(self.func._log_handler)
|
|
||||||
try:
|
|
||||||
curframe = inspect.currentframe()
|
|
||||||
self.func._log.debug("{0} called from {1}".format(
|
|
||||||
self.__name__, inspect.getouterframes(curframe, 2)[2][3]))
|
|
||||||
except:
|
|
||||||
self.func._log.debug(
|
|
||||||
"Unable to log where {0} was called from".format(
|
|
||||||
self.__name__))
|
|
||||||
|
|
||||||
def _stop_logging(self):
|
|
||||||
"""Stop logging"""
|
|
||||||
self.func._log.removeHandler(self.func._log_handler)
|
|
@ -1,202 +0,0 @@
|
|||||||
# Copyright 2015 Rackspace
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
@summary: Base Classes for Test Fixtures
|
|
||||||
@note: Corresponds DIRECTLY TO A unittest.TestCase
|
|
||||||
@see: http://docs.python.org/library/unittest.html#unittest.TestCase
|
|
||||||
"""
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
import six
|
|
||||||
import sys
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from cafe.drivers.base import FixtureReporter
|
|
||||||
|
|
||||||
|
|
||||||
class BaseTestFixture(unittest.TestCase):
|
|
||||||
"""
|
|
||||||
@summary: This should be used as the base class for any unittest tests,
|
|
||||||
meant to be used instead of unittest.TestCase.
|
|
||||||
@see: http://docs.python.org/library/unittest.html#unittest.TestCase
|
|
||||||
"""
|
|
||||||
|
|
||||||
def shortDescription(self):
|
|
||||||
"""
|
|
||||||
@summary: Returns a formatted description of the test
|
|
||||||
"""
|
|
||||||
short_desc = None
|
|
||||||
|
|
||||||
if os.environ.get("VERBOSE", None) == "true" and self._testMethodDoc:
|
|
||||||
temp = self._testMethodDoc.strip("\n")
|
|
||||||
short_desc = re.sub(r"[ ]{2,}", "", temp).strip("\n")
|
|
||||||
return short_desc
|
|
||||||
|
|
||||||
def logDescription(self):
|
|
||||||
"""
|
|
||||||
@summary: Returns a formatted description from the _testMethodDoc
|
|
||||||
"""
|
|
||||||
log_desc = None
|
|
||||||
if self._testMethodDoc:
|
|
||||||
log_desc = "\n{0}".format(
|
|
||||||
re.sub(r"[ ]{2,}", "", self._testMethodDoc).strip("\n"))
|
|
||||||
return log_desc
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def assertClassSetupFailure(cls, message):
|
|
||||||
"""
|
|
||||||
@summary: Use this if you need to fail from a Test Fixture's
|
|
||||||
setUpClass() method
|
|
||||||
"""
|
|
||||||
cls.fixture_log.error("FATAL: %s:%s", cls.__name__, message)
|
|
||||||
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def assertClassTeardownFailure(cls, message):
|
|
||||||
"""
|
|
||||||
@summary: Use this if you need to fail from a Test Fixture's
|
|
||||||
tearUpClass() method
|
|
||||||
"""
|
|
||||||
cls.fixture_log.error("FATAL: %s:%s", cls.__name__, message)
|
|
||||||
raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def setUpClass(cls):
|
|
||||||
"""@summary: Adds logging/reporting to Unittest setUpClass"""
|
|
||||||
super(BaseTestFixture, cls).setUpClass()
|
|
||||||
cls._reporter = FixtureReporter(cls)
|
|
||||||
cls.fixture_log = cls._reporter.logger.log
|
|
||||||
cls._reporter.start()
|
|
||||||
cls._class_cleanup_tasks = []
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def tearDownClass(cls):
|
|
||||||
"""@summary: Adds stop reporting to Unittest setUpClass"""
|
|
||||||
cls._reporter.stop()
|
|
||||||
# Call super teardown after to avoid tearing down the class before we
|
|
||||||
# can run our own tear down stuff.
|
|
||||||
super(BaseTestFixture, cls).tearDownClass()
|
|
||||||
|
|
||||||
def setUp(self):
|
|
||||||
"""@summary: Logs test metrics"""
|
|
||||||
self.shortDescription()
|
|
||||||
self._reporter.start_test_metrics(
|
|
||||||
self.__class__.__name__, self._testMethodName,
|
|
||||||
self.logDescription())
|
|
||||||
super(BaseTestFixture, self).setUp()
|
|
||||||
|
|
||||||
def tearDown(self):
|
|
||||||
"""
|
|
||||||
@todo: This MUST be upgraded this from resultForDoCleanups into a
|
|
||||||
better pattern or working with the result object directly.
|
|
||||||
This is related to the todo in L{TestRunMetrics}
|
|
||||||
"""
|
|
||||||
if sys.version_info < (3, 4):
|
|
||||||
if six.PY2:
|
|
||||||
report = self._resultForDoCleanups
|
|
||||||
else:
|
|
||||||
report = self._outcomeForDoCleanups
|
|
||||||
|
|
||||||
if any(r for r in report.failures
|
|
||||||
if self._test_name_matches_result(self._testMethodName, r)):
|
|
||||||
self._reporter.stop_test_metrics(self._testMethodName,
|
|
||||||
'Failed')
|
|
||||||
elif any(r for r in report.errors
|
|
||||||
if self._test_name_matches_result(self._testMethodName,
|
|
||||||
r)):
|
|
||||||
self._reporter.stop_test_metrics(self._testMethodName,
|
|
||||||
'ERRORED')
|
|
||||||
else:
|
|
||||||
self._reporter.stop_test_metrics(self._testMethodName,
|
|
||||||
'Passed')
|
|
||||||
else:
|
|
||||||
for method, _ in self._outcome.errors:
|
|
||||||
if self._test_name_matches_result(self._testMethodName,
|
|
||||||
method):
|
|
||||||
self._reporter.stop_test_metrics(self._testMethodName,
|
|
||||||
'Failed')
|
|
||||||
else:
|
|
||||||
self._reporter.stop_test_metrics(self._testMethodName,
|
|
||||||
'Passed')
|
|
||||||
|
|
||||||
# Let the base handle whatever hoodoo it needs
|
|
||||||
super(BaseTestFixture, self).tearDown()
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _test_name_matches_result(name, test_result):
|
|
||||||
"""@summary: Checks if a test result matches a specific test name."""
|
|
||||||
if sys.version_info < (3, 4):
|
|
||||||
# Try to get the result portion of the tuple
|
|
||||||
try:
|
|
||||||
result = test_result[0]
|
|
||||||
except IndexError:
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
result = test_result
|
|
||||||
|
|
||||||
# Verify the object has the correct property
|
|
||||||
if hasattr(result, '_testMethodName'):
|
|
||||||
return result._testMethodName == name
|
|
||||||
else:
|
|
||||||
return False
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def _do_class_cleanup_tasks(cls):
|
|
||||||
"""@summary: Runs class cleanup tasks added during testing"""
|
|
||||||
for func, args, kwargs in reversed(cls._class_cleanup_tasks):
|
|
||||||
cls.fixture_log.debug(
|
|
||||||
"Running class cleanup task: %s(%s, %s)",
|
|
||||||
func.__name__,
|
|
||||||
", ".join([str(arg) for arg in args]),
|
|
||||||
", ".join(["{0}={1}".format(
|
|
||||||
str(k), str(kwargs[k])) for k in kwargs]))
|
|
||||||
try:
|
|
||||||
func(*args, **kwargs)
|
|
||||||
except Exception as exception:
|
|
||||||
# Pretty prints method signature in the following format:
|
|
||||||
# "classTearDown failure: Unable to execute FnName(a, b, c=42)"
|
|
||||||
cls.fixture_log.exception(exception)
|
|
||||||
cls.fixture_log.error(
|
|
||||||
"classTearDown failure: Exception occured while trying to"
|
|
||||||
" execute class teardown task: %s(%s, %s)",
|
|
||||||
func.__name__,
|
|
||||||
", ".join([str(arg) for arg in args]),
|
|
||||||
", ".join(["{0}={1}".format(
|
|
||||||
str(k), str(kwargs[k])) for k in kwargs]))
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def addClassCleanup(cls, function, *args, **kwargs):
|
|
||||||
"""@summary: Named to match unittest's addCleanup.
|
|
||||||
ClassCleanup tasks run if setUpClass fails, or after tearDownClass.
|
|
||||||
(They don't depend on tearDownClass running)
|
|
||||||
"""
|
|
||||||
|
|
||||||
cls._class_cleanup_tasks.append((function, args or [], kwargs or {}))
|
|
||||||
|
|
||||||
|
|
||||||
class BaseBurnInTestFixture(BaseTestFixture):
|
|
||||||
"""
|
|
||||||
@summary: Base test fixture that allows for Burn-In tests
|
|
||||||
"""
|
|
||||||
@classmethod
|
|
||||||
def setUpClass(cls):
|
|
||||||
"""@summary: inits burning testing variables"""
|
|
||||||
super(BaseBurnInTestFixture, cls).setUpClass()
|
|
||||||
cls.test_list = []
|
|
||||||
cls.iterations = 0
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def addTest(cls, test_case):
|
|
||||||
"""@summary: Adds a test case"""
|
|
||||||
cls.test_list.append(test_case)
|
|
@ -1,98 +0,0 @@
|
|||||||
# Copyright 2015 Rackspace
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
from unittest.suite import _ErrorHolder
|
|
||||||
import json
|
|
||||||
|
|
||||||
|
|
||||||
class SummarizeResults(object):
|
|
||||||
"""Reads in vars dict from suite and builds a Summarized results obj"""
|
|
||||||
def __init__(self, result_dict, testsuite, execution_time):
|
|
||||||
self.testsuite = testsuite
|
|
||||||
self.execution_time = execution_time
|
|
||||||
self.all_tests = getattr(testsuite, "_tests", [])
|
|
||||||
self.failures = result_dict.get("failures", [])
|
|
||||||
self.skipped = result_dict.get("skipped", [])
|
|
||||||
self.errors = result_dict.get("errors", [])
|
|
||||||
self.tests_run = result_dict.get("testsRun", 0)
|
|
||||||
|
|
||||||
def get_passed_tests(self):
|
|
||||||
"""Gets a list of results objects for passed tests"""
|
|
||||||
errored_tests = [
|
|
||||||
t[0] for t in self.errors if not isinstance(t[0], _ErrorHolder)]
|
|
||||||
setup_errored_classes = [
|
|
||||||
str(t[0]).split(".")[-1].rstrip(')')
|
|
||||||
for t in self.errors if isinstance(t[0], _ErrorHolder)]
|
|
||||||
setup_errored_tests = [
|
|
||||||
t for t in self.all_tests
|
|
||||||
if t.__class__.__name__ in setup_errored_classes]
|
|
||||||
|
|
||||||
passed_tests = list(
|
|
||||||
set(self.all_tests) -
|
|
||||||
set([test[0] for test in self.failures]) -
|
|
||||||
set([test[0] for test in self.skipped]) -
|
|
||||||
set(errored_tests) - set(setup_errored_tests))
|
|
||||||
|
|
||||||
return [self._create_result(t) for t in passed_tests]
|
|
||||||
|
|
||||||
def summary_result(self):
|
|
||||||
"""Returns a dictionary containing counts of tests and statuses"""
|
|
||||||
return {
|
|
||||||
'tests': self.tests_run,
|
|
||||||
'errors': len(self.errors),
|
|
||||||
'failures': len(self.failures),
|
|
||||||
'skipped': len(self.skipped)}
|
|
||||||
|
|
||||||
def gather_results(self):
|
|
||||||
"""Gets a result obj for all tests ran and failed setup classes"""
|
|
||||||
return (
|
|
||||||
self.get_passed_tests() +
|
|
||||||
[self._create_result(t, "failures") for t in self.failures] +
|
|
||||||
[self._create_result(t, "errored") for t in self.errors] +
|
|
||||||
[self._create_result(t, "skipped") for t in self.skipped])
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _create_result(test, type_="passed"):
|
|
||||||
"""Creates a Result object from a test and type of test"""
|
|
||||||
msg_type = {"failures": "failure_trace", "skipped": "skipped_msg",
|
|
||||||
"errored": "error_trace"}
|
|
||||||
if type_ == "passed":
|
|
||||||
dic = {"test_method_name": getattr(test, '_testMethodName', ""),
|
|
||||||
"test_class_name": test.__class__.__name__}
|
|
||||||
|
|
||||||
elif (type_ in ["failures", "skipped", "errored"] and
|
|
||||||
not isinstance(test[0], _ErrorHolder)):
|
|
||||||
dic = {"test_method_name": getattr(test[0], '_testMethodName', ""),
|
|
||||||
"test_class_name": test[0].__class__.__name__,
|
|
||||||
msg_type.get(type_, "error_trace"): test[1]}
|
|
||||||
else:
|
|
||||||
dic = {"test_method_name": str(test[0]).split(" ")[0],
|
|
||||||
"test_class_name": str(test[0]).split(".")[-1].rstrip(')'),
|
|
||||||
msg_type.get(type_, "error_trace"): test[1]}
|
|
||||||
return Result(**dic)
|
|
||||||
|
|
||||||
|
|
||||||
class Result(object):
|
|
||||||
"""Result object used to create the json and xml results"""
|
|
||||||
def __init__(
|
|
||||||
self, test_class_name, test_method_name, failure_trace=None,
|
|
||||||
skipped_msg=None, error_trace=None):
|
|
||||||
|
|
||||||
self.test_class_name = test_class_name
|
|
||||||
self.test_method_name = test_method_name
|
|
||||||
self.failure_trace = failure_trace
|
|
||||||
self.skipped_msg = skipped_msg
|
|
||||||
self.error_trace = error_trace
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return json.dumps(self.__dict__)
|
|
@ -1,4 +1,3 @@
|
|||||||
#!/usr/bin/env python
|
|
||||||
# Copyright 2015 Rackspace
|
# Copyright 2015 Rackspace
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
# not use this file except in compliance with the License. You may obtain
|
# not use this file except in compliance with the License. You may obtain
|
||||||
@ -14,7 +13,7 @@
|
|||||||
|
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
|
|
||||||
from multiprocessing import Process, Queue, active_children
|
from multiprocessing import Process, Queue
|
||||||
from StringIO import StringIO
|
from StringIO import StringIO
|
||||||
from unittest.runner import _WritelnDecorator
|
from unittest.runner import _WritelnDecorator
|
||||||
import importlib
|
import importlib
|
||||||
@ -29,7 +28,7 @@ from cafe.common.reporting import cclogging
|
|||||||
from cafe.common.reporting.reporter import Reporter
|
from cafe.common.reporting.reporter import Reporter
|
||||||
from cafe.configurator.managers import TestEnvManager
|
from cafe.configurator.managers import TestEnvManager
|
||||||
from cafe.drivers.unittest.arguments import ArgumentParser
|
from cafe.drivers.unittest.arguments import ArgumentParser
|
||||||
from cafe.drivers.unittest.common import print_exception, get_error
|
from cafe.drivers.base import print_exception, get_error
|
||||||
from cafe.drivers.unittest.parsers import SummarizeResults
|
from cafe.drivers.unittest.parsers import SummarizeResults
|
||||||
from cafe.drivers.unittest.suite_builder import SuiteBuilder
|
from cafe.drivers.unittest.suite_builder import SuiteBuilder
|
||||||
|
|
||||||
@ -68,14 +67,14 @@ class UnittestRunner(object):
|
|||||||
self.cl_args.data_directory or self.test_env.test_data_directory)
|
self.cl_args.data_directory or self.test_env.test_data_directory)
|
||||||
self.test_env.finalize()
|
self.test_env.finalize()
|
||||||
cclogging.init_root_log_handler()
|
cclogging.init_root_log_handler()
|
||||||
|
|
||||||
self.cl_args.testrepos = import_repos(self.cl_args.testrepos)
|
|
||||||
self.print_configuration(self.test_env, self.cl_args.testrepos)
|
self.print_configuration(self.test_env, self.cl_args.testrepos)
|
||||||
|
self.cl_args.testrepos = import_repos(self.cl_args.testrepos)
|
||||||
|
|
||||||
self.suites = SuiteBuilder(
|
self.suites = SuiteBuilder(
|
||||||
testrepos=self.cl_args.testrepos,
|
testrepos=self.cl_args.testrepos,
|
||||||
tags=self.cl_args.tags,
|
tags=self.cl_args.tags,
|
||||||
all_tags=self.cl_args.all_tags,
|
all_tags=self.cl_args.all_tags,
|
||||||
dotpath_regex=self.cl_args.dotpath_regex,
|
regex_list=self.cl_args.regex_list,
|
||||||
file_=self.cl_args.file,
|
file_=self.cl_args.file,
|
||||||
dry_run=self.cl_args.dry_run,
|
dry_run=self.cl_args.dry_run,
|
||||||
exit_on_error=self.cl_args.exit_on_error).get_suites()
|
exit_on_error=self.cl_args.exit_on_error).get_suites()
|
||||||
@ -97,31 +96,23 @@ class UnittestRunner(object):
|
|||||||
to_worker.put(None)
|
to_worker.put(None)
|
||||||
|
|
||||||
start = time.time()
|
start = time.time()
|
||||||
|
|
||||||
|
# A second try catch is needed here because queues can cause locking
|
||||||
|
# when they go out of scope, especially when termination signals used
|
||||||
try:
|
try:
|
||||||
for _ in range(workers):
|
for _ in range(workers):
|
||||||
proc = Consumer(to_worker, from_worker, verbose, failfast)
|
proc = Consumer(to_worker, from_worker, verbose, failfast)
|
||||||
worker_list.append(proc)
|
worker_list.append(proc)
|
||||||
proc.start()
|
proc.start()
|
||||||
|
|
||||||
while active_children():
|
for _ in self.suites:
|
||||||
if from_worker.qsize():
|
|
||||||
results.append(self.log_result(from_worker.get()))
|
|
||||||
|
|
||||||
while not from_worker.empty():
|
|
||||||
results.append(self.log_result(from_worker.get()))
|
results.append(self.log_result(from_worker.get()))
|
||||||
|
|
||||||
tests_run, errors, failures = self.compile_results(
|
tests_run, errors, failures = self.compile_results(
|
||||||
time.time() - start, results)
|
time.time() - start, results)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
for proc in worker_list:
|
|
||||||
try:
|
|
||||||
os.kill(proc.pid, 9)
|
|
||||||
except:
|
|
||||||
# Process already exited, control C signal hit process
|
|
||||||
# when not in a test
|
|
||||||
pass
|
|
||||||
print_exception("Runner", "run", "Keyboard Interrupt, exiting...")
|
print_exception("Runner", "run", "Keyboard Interrupt, exiting...")
|
||||||
exit(get_error())
|
os.killpg(0, 9)
|
||||||
return bool(sum([errors, failures, not tests_run]))
|
return bool(sum([errors, failures, not tests_run]))
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -145,9 +136,9 @@ class UnittestRunner(object):
|
|||||||
print("Percolated Configuration")
|
print("Percolated Configuration")
|
||||||
print("-" * 150)
|
print("-" * 150)
|
||||||
if repos:
|
if repos:
|
||||||
print("BREWING FROM: ....: {0}".format(repos[0].__name__))
|
print("BREWING FROM: ....: {0}".format(repos[0]))
|
||||||
for repo in repos[1:]:
|
for repo in repos[1:]:
|
||||||
print("{0}{1}".format(" " * 20, repo.__name__))
|
print("{0}{1}".format(" " * 20, repo))
|
||||||
print("ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path))
|
print("ENGINE CONFIG FILE: {0}".format(test_env.engine_config_path))
|
||||||
print("TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path))
|
print("TEST CONFIG FILE..: {0}".format(test_env.test_config_file_path))
|
||||||
print("DATA DIRECTORY....: {0}".format(test_env.test_data_directory))
|
print("DATA DIRECTORY....: {0}".format(test_env.test_data_directory))
|
||||||
@ -181,8 +172,9 @@ class UnittestRunner(object):
|
|||||||
result_dict = {"tests": 0, "errors": 0, "failures": 0}
|
result_dict = {"tests": 0, "errors": 0, "failures": 0}
|
||||||
for dic in results:
|
for dic in results:
|
||||||
result = dic["result"]
|
result = dic["result"]
|
||||||
suite = dic["suite"]
|
tests = [suite for suite in self.suites
|
||||||
result_parser = SummarizeResults(vars(result), suite, run_time)
|
if suite.cafe_uuid == dic["cafe_uuid"]][0]
|
||||||
|
result_parser = SummarizeResults(vars(result), tests, run_time)
|
||||||
all_results += result_parser.gather_results()
|
all_results += result_parser.gather_results()
|
||||||
summary = result_parser.summary_result()
|
summary = result_parser.summary_result()
|
||||||
for key in result_dict:
|
for key in result_dict:
|
||||||
@ -252,11 +244,19 @@ class Consumer(Process):
|
|||||||
record.msg = "{0}\n{1}".format(
|
record.msg = "{0}\n{1}".format(
|
||||||
record.msg, traceback.format_exc(record.exc_info))
|
record.msg, traceback.format_exc(record.exc_info))
|
||||||
record.exc_info = None
|
record.exc_info = None
|
||||||
dic = {"result": result, "logs": handler._records, "suite": suite}
|
dic = {
|
||||||
|
"result": result,
|
||||||
|
"logs": handler._records,
|
||||||
|
"cafe_uuid": suite.cafe_uuid}
|
||||||
|
|
||||||
self.from_worker.put(dic)
|
self.from_worker.put(dic)
|
||||||
|
|
||||||
|
|
||||||
def entry_point():
|
def entry_point():
|
||||||
"""Function setup.py links cafe-runner to"""
|
"""Function setup.py links cafe-runner to"""
|
||||||
runner = UnittestRunner()
|
try:
|
||||||
exit(runner.run())
|
runner = UnittestRunner()
|
||||||
|
exit(runner.run())
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
print_exception("Runner", "run", "Keyboard Interrupt, exiting...")
|
||||||
|
os.killpg(0, 9)
|
||||||
|
@ -1,89 +0,0 @@
|
|||||||
# Copyright 2015 Rackspace
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Contains a monkeypatched version of unittest's TestSuite class that supports
|
|
||||||
a version of addCleanup that can be used in classmethods. This allows a
|
|
||||||
more granular approach to teardown to be used in setUpClass and classmethod
|
|
||||||
helper methods
|
|
||||||
"""
|
|
||||||
|
|
||||||
from unittest.suite import TestSuite, _DebugResult, util
|
|
||||||
|
|
||||||
|
|
||||||
class OpenCafeUnittestTestSuite(TestSuite):
|
|
||||||
|
|
||||||
def _tearDownPreviousClass(self, test, result):
|
|
||||||
previousClass = getattr(result, '_previousTestClass', None)
|
|
||||||
currentClass = test.__class__
|
|
||||||
if currentClass == previousClass:
|
|
||||||
return
|
|
||||||
if getattr(previousClass, '_classSetupFailed', False):
|
|
||||||
return
|
|
||||||
if getattr(result, '_moduleSetUpFailed', False):
|
|
||||||
return
|
|
||||||
if getattr(previousClass, "__unittest_skip__", False):
|
|
||||||
return
|
|
||||||
|
|
||||||
tearDownClass = getattr(previousClass, 'tearDownClass', None)
|
|
||||||
if tearDownClass is not None:
|
|
||||||
try:
|
|
||||||
tearDownClass()
|
|
||||||
except Exception as e:
|
|
||||||
if isinstance(result, _DebugResult):
|
|
||||||
raise
|
|
||||||
className = util.strclass(previousClass)
|
|
||||||
errorName = 'tearDownClass (%s)' % className
|
|
||||||
self._addClassOrModuleLevelException(result, e, errorName)
|
|
||||||
# Monkeypatch: run class cleanup tasks regardless of whether
|
|
||||||
# tearDownClass succeeds or not
|
|
||||||
finally:
|
|
||||||
if hasattr(previousClass, '_do_class_cleanup_tasks'):
|
|
||||||
previousClass._do_class_cleanup_tasks()
|
|
||||||
|
|
||||||
# Monkeypatch: run class cleanup tasks regardless of whether
|
|
||||||
# tearDownClass exists or not
|
|
||||||
else:
|
|
||||||
if getattr(previousClass, '_do_class_cleanup_tasks', False):
|
|
||||||
previousClass._do_class_cleanup_tasks()
|
|
||||||
|
|
||||||
def _handleClassSetUp(self, test, result):
|
|
||||||
previousClass = getattr(result, '_previousTestClass', None)
|
|
||||||
currentClass = test.__class__
|
|
||||||
if currentClass == previousClass:
|
|
||||||
return
|
|
||||||
if result._moduleSetUpFailed:
|
|
||||||
return
|
|
||||||
if getattr(currentClass, "__unittest_skip__", False):
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
currentClass._classSetupFailed = False
|
|
||||||
except TypeError:
|
|
||||||
# test may actually be a function
|
|
||||||
# so its class will be a builtin-type
|
|
||||||
pass
|
|
||||||
|
|
||||||
setUpClass = getattr(currentClass, 'setUpClass', None)
|
|
||||||
if setUpClass is not None:
|
|
||||||
try:
|
|
||||||
setUpClass()
|
|
||||||
except Exception as e:
|
|
||||||
if isinstance(result, _DebugResult):
|
|
||||||
raise
|
|
||||||
currentClass._classSetupFailed = True
|
|
||||||
className = util.strclass(currentClass)
|
|
||||||
errorName = 'setUpClass (%s)' % className
|
|
||||||
self._addClassOrModuleLevelException(result, e, errorName)
|
|
||||||
# Monkeypatch: Run class cleanup if setUpClass fails
|
|
||||||
currentClass._do_class_cleanup_tasks()
|
|
@ -52,9 +52,9 @@ class PositiveDataGenerator(DatasetList):
|
|||||||
"arg_update": ["--result-directory", "/"],
|
"arg_update": ["--result-directory", "/"],
|
||||||
"update": {"result_directory": "/"}})
|
"update": {"result_directory": "/"}})
|
||||||
|
|
||||||
self.append_new_dataset("dotpath_regex", {
|
self.append_new_dataset("regex_list", {
|
||||||
"arg_update": ["-d", ".*", "..."],
|
"arg_update": ["-d", ".*", "..."],
|
||||||
"update": {"dotpath_regex": [".*", "..."]}})
|
"update": {"regex_list": [".*", "..."]}})
|
||||||
|
|
||||||
self.append_new_dataset("dry_run", {
|
self.append_new_dataset("dry_run", {
|
||||||
"arg_update": ["--dry-run"],
|
"arg_update": ["--dry-run"],
|
||||||
@ -113,7 +113,7 @@ class ArgumentsTests(unittest.TestCase):
|
|||||||
"""ArgumentParser Tests"""
|
"""ArgumentParser Tests"""
|
||||||
good_package = "tests.repo"
|
good_package = "tests.repo"
|
||||||
bad_package = "tests.fakerepo"
|
bad_package = "tests.fakerepo"
|
||||||
good_module = "tests.repo.test_module"
|
good_module = "tests.repo.cafe_tests"
|
||||||
bad_module = "tests.repo.blah"
|
bad_module = "tests.repo.blah"
|
||||||
bad_path = "tests."
|
bad_path = "tests."
|
||||||
good_config = CONFIG_NAME
|
good_config = CONFIG_NAME
|
||||||
@ -125,7 +125,7 @@ class ArgumentsTests(unittest.TestCase):
|
|||||||
"tags": [],
|
"tags": [],
|
||||||
"all_tags": False,
|
"all_tags": False,
|
||||||
"data_directory": None,
|
"data_directory": None,
|
||||||
"dotpath_regex": [],
|
"regex_list": [],
|
||||||
"dry_run": False,
|
"dry_run": False,
|
||||||
"exit_on_error": False,
|
"exit_on_error": False,
|
||||||
"failfast": False,
|
"failfast": False,
|
||||||
@ -142,8 +142,8 @@ class ArgumentsTests(unittest.TestCase):
|
|||||||
def setUpClass(cls):
|
def setUpClass(cls):
|
||||||
super(ArgumentsTests, cls).setUpClass()
|
super(ArgumentsTests, cls).setUpClass()
|
||||||
file_ = open(cls.config, "w")
|
file_ = open(cls.config, "w")
|
||||||
file_.write("test_fail (tests.repo.test_module.NoDataGenerator)\n")
|
file_.write("test_fail (tests.repo.cafe_tests.NoDataGenerator)\n")
|
||||||
file_.write("test_pass (tests.repo.test_module.NoDataGenerator)\n")
|
file_.write("test_pass (tests.repo.cafe_tests.NoDataGenerator)\n")
|
||||||
file_.close()
|
file_.close()
|
||||||
|
|
||||||
def get_updated_expected(self, **kwargs):
|
def get_updated_expected(self, **kwargs):
|
||||||
|
Loading…
x
Reference in New Issue
Block a user