Merge "Add skipped tests to output of parallel runner"
This commit is contained in:
commit
fd5391ce74
@ -180,7 +180,7 @@ class UnittestRunner(object):
|
|||||||
def compile_results(self, run_time, datagen_time, results):
|
def compile_results(self, run_time, datagen_time, results):
|
||||||
"""Summarizes results and writes results to file if --result used"""
|
"""Summarizes results and writes results to file if --result used"""
|
||||||
all_results = []
|
all_results = []
|
||||||
result_dict = {"tests": 0, "errors": 0, "failures": 0}
|
result_dict = {"tests": 0, "errors": 0, "failures": 0, "skipped": 0}
|
||||||
for dic in results:
|
for dic in results:
|
||||||
result = dic["result"]
|
result = dic["result"]
|
||||||
tests = [suite for suite in self.suites
|
tests = [suite for suite in self.suites
|
||||||
@ -205,7 +205,8 @@ class UnittestRunner(object):
|
|||||||
return self.print_results(
|
return self.print_results(
|
||||||
run_time=run_time, datagen_time=datagen_time, **result_dict)
|
run_time=run_time, datagen_time=datagen_time, **result_dict)
|
||||||
|
|
||||||
def print_results(self, tests, errors, failures, run_time, datagen_time):
|
def print_results(self, tests, errors, failures, skipped,
|
||||||
|
run_time, datagen_time):
|
||||||
"""Prints results summerized in compile_results messages"""
|
"""Prints results summerized in compile_results messages"""
|
||||||
print("{0}".format("-" * 70))
|
print("{0}".format("-" * 70))
|
||||||
print("Ran {0} test{1} in {2:.3f}s".format(
|
print("Ran {0} test{1} in {2:.3f}s".format(
|
||||||
@ -213,11 +214,18 @@ class UnittestRunner(object):
|
|||||||
print("Generated datasets in {0:.3f}s".format(datagen_time))
|
print("Generated datasets in {0:.3f}s".format(datagen_time))
|
||||||
print("Total runtime {0:.3f}s".format(run_time + datagen_time))
|
print("Total runtime {0:.3f}s".format(run_time + datagen_time))
|
||||||
|
|
||||||
if failures or errors:
|
results = []
|
||||||
print("\nFAILED ({0}{1}{2})".format(
|
if failures:
|
||||||
"failures={0}".format(failures) if failures else "",
|
results.append("failures={0}".format(failures))
|
||||||
", " if failures and errors else "",
|
if skipped:
|
||||||
"errors={0}".format(errors) if errors else ""))
|
results.append("skipped={0}".format(skipped))
|
||||||
|
if errors:
|
||||||
|
results.append("errors={0}".format(errors))
|
||||||
|
|
||||||
|
status = "FAILED" if failures or errors else "PASSED"
|
||||||
|
print("\n{} ".format(status), end="")
|
||||||
|
if results:
|
||||||
|
print("({})".format(", ".join(results)))
|
||||||
print("{0}\nDetailed logs: {1}\n{2}".format(
|
print("{0}\nDetailed logs: {1}\n{2}".format(
|
||||||
"=" * 150, self.test_env.test_log_dir, "-" * 150))
|
"=" * 150, self.test_env.test_log_dir, "-" * 150))
|
||||||
return tests, errors, failures
|
return tests, errors, failures
|
||||||
|
95
tests/drivers/unittest/test_runner_parallel.py
Normal file
95
tests/drivers/unittest/test_runner_parallel.py
Normal file
@ -0,0 +1,95 @@
|
|||||||
|
# Copyright 2016 Rackspace
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import argparse
|
||||||
|
import mock
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from six import StringIO
|
||||||
|
|
||||||
|
from cafe.drivers.unittest import runner_parallel
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def capture(command, *args, **kwargs):
|
||||||
|
out, sys.stdout = sys.stdout, StringIO()
|
||||||
|
command(*args, **kwargs)
|
||||||
|
sys.stdout.seek(0)
|
||||||
|
yield sys.stdout.read()
|
||||||
|
sys.stdout = out
|
||||||
|
|
||||||
|
|
||||||
|
class TestUnittestRunner(runner_parallel.UnittestRunner):
|
||||||
|
def __init__(self):
|
||||||
|
self.test_env = mock.Mock(test_log_dir='test_dir')
|
||||||
|
|
||||||
|
|
||||||
|
class ParallelRunnerTests(unittest.TestCase):
|
||||||
|
"""Metatests for the Parrallel Runner."""
|
||||||
|
def setUp(self):
|
||||||
|
self.runner = TestUnittestRunner()
|
||||||
|
self.run_time = 10.5
|
||||||
|
self.datagen_time = 180.4
|
||||||
|
|
||||||
|
def test_skipped_results_show_when_present(self):
|
||||||
|
"""Check that skipped test count prints in results if present."""
|
||||||
|
|
||||||
|
with capture(self.runner.print_results, tests=200, errors=0,
|
||||||
|
failures=0, skipped=4, run_time=self.run_time,
|
||||||
|
datagen_time=self.datagen_time) as output:
|
||||||
|
expected_text = "PASSED (skipped=4)\n"
|
||||||
|
|
||||||
|
self.assertIn(expected_text, output)
|
||||||
|
|
||||||
|
def test_error_results_show_when_present(self):
|
||||||
|
"""Check that errored test count prints in results if present."""
|
||||||
|
|
||||||
|
with capture(self.runner.print_results, tests=200, errors=5,
|
||||||
|
failures=0, skipped=0, run_time=self.run_time,
|
||||||
|
datagen_time=self.datagen_time) as output:
|
||||||
|
expected_text = "FAILED (errors=5)\n"
|
||||||
|
|
||||||
|
self.assertIn(expected_text, output)
|
||||||
|
|
||||||
|
def test_failures_results_show_when_present(self):
|
||||||
|
"""Check that errored test count prints in results if present."""
|
||||||
|
|
||||||
|
with capture(self.runner.print_results, tests=200, errors=0,
|
||||||
|
failures=6, skipped=0, run_time=self.run_time,
|
||||||
|
datagen_time=self.datagen_time) as output:
|
||||||
|
expected_text = "FAILED (failures=6)\n"
|
||||||
|
|
||||||
|
self.assertIn(expected_text, output)
|
||||||
|
|
||||||
|
def test_all_failed_results_show_when_present(self):
|
||||||
|
"""Check that errored test count prints in results if present."""
|
||||||
|
|
||||||
|
with capture(self.runner.print_results, tests=200, errors=9,
|
||||||
|
failures=7, skipped=8, run_time=self.run_time,
|
||||||
|
datagen_time=self.datagen_time) as output:
|
||||||
|
expected_text = "FAILED (failures=7, skipped=8, errors=9)\n"
|
||||||
|
|
||||||
|
self.assertIn(expected_text, output)
|
||||||
|
|
||||||
|
def test_shows_passed_when_no_failures_skips_or_errors(self):
|
||||||
|
"""Check that errored test count prints in results if present."""
|
||||||
|
|
||||||
|
with capture(self.runner.print_results, tests=200, errors=0,
|
||||||
|
failures=0, skipped=0, run_time=self.run_time,
|
||||||
|
datagen_time=self.datagen_time) as output:
|
||||||
|
expected_text = "PASSED"
|
||||||
|
|
||||||
|
self.assertIn(expected_text, output)
|
Loading…
x
Reference in New Issue
Block a user