|
|
|
@ -25,6 +25,12 @@ import tempfile
@@ -25,6 +25,12 @@ import tempfile
|
|
|
|
|
import re |
|
|
|
|
import logging |
|
|
|
|
|
|
|
|
|
BOLD = ("", "") |
|
|
|
|
if os.name == 'posix': |
|
|
|
|
# primitive formatting on supported |
|
|
|
|
# terminal via ANSI escape sequences: |
|
|
|
|
BOLD = ('\033[0m', '\033[1m') |
|
|
|
|
|
|
|
|
|
TEST_EXIT_PASSED = 0 |
|
|
|
|
TEST_EXIT_SKIPPED = 77 |
|
|
|
|
|
|
|
|
@ -231,11 +237,6 @@ def main():
@@ -231,11 +237,6 @@ def main():
|
|
|
|
|
run_tests(test_list, config["environment"]["SRCDIR"], config["environment"]["BUILDDIR"], config["environment"]["EXEEXT"], args.jobs, args.coverage, passon_args) |
|
|
|
|
|
|
|
|
|
def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=False, args=[]): |
|
|
|
|
BOLD = ("","") |
|
|
|
|
if os.name == 'posix': |
|
|
|
|
# primitive formatting on supported |
|
|
|
|
# terminal via ANSI escape sequences: |
|
|
|
|
BOLD = ('\033[0m', '\033[1m') |
|
|
|
|
|
|
|
|
|
#Set env vars |
|
|
|
|
if "BITCOIND" not in os.environ: |
|
|
|
@ -258,33 +259,26 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
@@ -258,33 +259,26 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
|
|
|
|
|
subprocess.check_output([tests_dir + 'create_cache.py'] + flags) |
|
|
|
|
|
|
|
|
|
#Run Tests |
|
|
|
|
all_passed = True |
|
|
|
|
time_sum = 0 |
|
|
|
|
time0 = time.time() |
|
|
|
|
|
|
|
|
|
job_queue = TestHandler(jobs, tests_dir, test_list, flags) |
|
|
|
|
time0 = time.time() |
|
|
|
|
test_results = [] |
|
|
|
|
|
|
|
|
|
max_len_name = len(max(test_list, key=len)) |
|
|
|
|
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] |
|
|
|
|
|
|
|
|
|
for _ in range(len(test_list)): |
|
|
|
|
(name, stdout, stderr, status, duration) = job_queue.get_next() |
|
|
|
|
all_passed = all_passed and status != "Failed" |
|
|
|
|
time_sum += duration |
|
|
|
|
|
|
|
|
|
if status == "Passed": |
|
|
|
|
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], name, BOLD[0], duration)) |
|
|
|
|
elif status == "Skipped": |
|
|
|
|
logging.debug("\n%s%s%s skipped" % (BOLD[1], name, BOLD[0])) |
|
|
|
|
test_result, stdout, stderr = job_queue.get_next() |
|
|
|
|
test_results.append(test_result) |
|
|
|
|
|
|
|
|
|
if test_result.status == "Passed": |
|
|
|
|
logging.debug("\n%s%s%s passed, Duration: %s s" % (BOLD[1], test_result.name, BOLD[0], test_result.time)) |
|
|
|
|
elif test_result.status == "Skipped": |
|
|
|
|
logging.debug("\n%s%s%s skipped" % (BOLD[1], test_result.name, BOLD[0])) |
|
|
|
|
else: |
|
|
|
|
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], name, BOLD[0], duration)) |
|
|
|
|
print("\n%s%s%s failed, Duration: %s s\n" % (BOLD[1], test_result.name, BOLD[0], test_result.time)) |
|
|
|
|
print(BOLD[1] + 'stdout:\n' + BOLD[0] + stdout + '\n') |
|
|
|
|
print(BOLD[1] + 'stderr:\n' + BOLD[0] + stderr + '\n') |
|
|
|
|
|
|
|
|
|
results += "%s | %s | %s s\n" % (name.ljust(max_len_name), status.ljust(7), duration) |
|
|
|
|
|
|
|
|
|
results += BOLD[1] + "\n%s | %s | %s s (accumulated)" % ("ALL".ljust(max_len_name), str(all_passed).ljust(7), time_sum) + BOLD[0] |
|
|
|
|
print(results) |
|
|
|
|
print("\nRuntime: %s s" % (int(time.time() - time0))) |
|
|
|
|
print_results(test_results, max_len_name, (int(time.time() - time0))) |
|
|
|
|
|
|
|
|
|
if coverage: |
|
|
|
|
coverage.report_rpc_coverage() |
|
|
|
@ -292,8 +286,27 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
@@ -292,8 +286,27 @@ def run_tests(test_list, src_dir, build_dir, exeext, jobs=1, enable_coverage=Fal
|
|
|
|
|
logging.debug("Cleaning up coverage data") |
|
|
|
|
coverage.cleanup() |
|
|
|
|
|
|
|
|
|
all_passed = all(map(lambda test_result: test_result.status == "Passed", test_results)) |
|
|
|
|
|
|
|
|
|
sys.exit(not all_passed) |
|
|
|
|
|
|
|
|
|
def print_results(test_results, max_len_name, runtime): |
|
|
|
|
results = "\n" + BOLD[1] + "%s | %s | %s\n\n" % ("TEST".ljust(max_len_name), "STATUS ", "DURATION") + BOLD[0] |
|
|
|
|
|
|
|
|
|
test_results.sort(key=lambda result: result.name.lower()) |
|
|
|
|
all_passed = True |
|
|
|
|
time_sum = 0 |
|
|
|
|
|
|
|
|
|
for test_result in test_results: |
|
|
|
|
all_passed = all_passed and test_result.status != "Failed" |
|
|
|
|
time_sum += test_result.time |
|
|
|
|
test_result.padding = max_len_name |
|
|
|
|
results += str(test_result) |
|
|
|
|
|
|
|
|
|
results += BOLD[1] + "\n%s | %s | %s s (accumulated) \n" % ("ALL".ljust(max_len_name), str(all_passed).ljust(7), time_sum) + BOLD[0] |
|
|
|
|
results += "Runtime: %s s\n" % (runtime) |
|
|
|
|
print(results) |
|
|
|
|
|
|
|
|
|
class TestHandler: |
|
|
|
|
""" |
|
|
|
|
Trigger the testscrips passed in via the list. |
|
|
|
@ -348,9 +361,32 @@ class TestHandler:
@@ -348,9 +361,32 @@ class TestHandler:
|
|
|
|
|
status = "Failed" |
|
|
|
|
self.num_running -= 1 |
|
|
|
|
self.jobs.remove(j) |
|
|
|
|
return name, stdout, stderr, status, int(time.time() - time0) |
|
|
|
|
|
|
|
|
|
return TestResult(name, status, int(time.time() - time0)), stdout, stderr |
|
|
|
|
print('.', end='', flush=True) |
|
|
|
|
|
|
|
|
|
class TestResult(): |
|
|
|
|
def __init__(self, name, status, time): |
|
|
|
|
self.name = name |
|
|
|
|
self.status = status |
|
|
|
|
self.time = time |
|
|
|
|
self.padding = 0 |
|
|
|
|
|
|
|
|
|
def __repr__(self): |
|
|
|
|
COLOR = ("", "") |
|
|
|
|
if os.name == 'posix': |
|
|
|
|
# primitive formatting on supported |
|
|
|
|
# terminal via ANSI escape sequences: |
|
|
|
|
if self.status == "Passed": |
|
|
|
|
COLOR = ('\033[0m', '\033[0;34m') |
|
|
|
|
elif self.status == "Failed": |
|
|
|
|
COLOR = ('\033[0m', '\033[0;31m') |
|
|
|
|
elif self.status == "Skipped": |
|
|
|
|
COLOR = ('\033[0m', '\033[1;30m') |
|
|
|
|
|
|
|
|
|
return COLOR[1] + "%s | %s | %s s\n" % (self.name.ljust(self.padding), self.status.ljust(7), self.time) + COLOR[0] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def check_script_list(src_dir): |
|
|
|
|
"""Check scripts directory. |
|
|
|
|
|
|
|
|
|