Skip to content

Commit eda3919

Browse files
authored
Merge pull request #850 from mnordsletten/test_improvements
test.py improvements
2 parents 822d138 + 7e6c62d commit eda3919

1 file changed

Lines changed: 184 additions & 92 deletions

File tree

test/test.py

Lines changed: 184 additions & 92 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,9 @@
1515

1616
startdir = os.getcwd()
1717

18+
test_categories = ['fs', 'hw', 'kernel', 'mod', 'net', 'performance', 'platform', 'stl', 'util']
19+
test_types = ['integration', 'stress', 'unit', 'examples']
20+
1821
"""
1922
Script used for running all the valid tests in the terminal.
2023
"""
@@ -41,53 +44,67 @@
4144

4245
test_count = 0
4346

44-
def print_skipped(name, reason):
45-
print pretty.WARNING("* Skipping " + name)
46-
print " Reason: {0:40}".format(reason)
47-
48-
def valid_tests(subfolder=None):
49-
"""Returns a list of all the valid integration tests in */integration/*"""
50-
if "integration" in args.skip:
51-
return []
52-
53-
skip_json = json.loads(open("skipped_tests.json").read())
54-
for skip in skip_json:
55-
print_skipped(skip["name"], skip["reason"])
56-
args.skip.append(skip["name"])
57-
58-
print
59-
60-
valid_tests = [ x for x in validate_all.valid_tests() if not x in args.skip ]
61-
62-
if subfolder:
63-
return [x for x in valid_tests if x.split('/')[0] == subfolder]
64-
65-
if args.tests:
66-
return [x for x in valid_tests if x.split("/")[-1] in args.tests]
67-
68-
return valid_tests
47+
def print_skipped(tests):
48+
for test in tests:
49+
if test.skip_:
50+
print pretty.WARNING("* Skipping " + test.name_)
51+
print " Reason: {0:40}".format(test.skip_reason_)
6952

7053

7154
class Test:
7255
""" A class to start a test as a subprocess and pretty-print status """
73-
def __init__(self, path, clean = False, command = ['sudo', '-E', 'python', 'test.py'], name = None):
56+
def __init__(self, path, clean=False, command=['sudo', '-E', 'python', 'test.py'], name=None):
7457

7558
self.command_ = command
7659
self.proc_ = None
7760
self.path_ = path
7861
self.output_ = None
7962

80-
if (name == None):
63+
# Extract category and type from the path variable
64+
# Category is linked to the top level folder e.g. net, fs, hw
65+
# Type is linked to the type of test e.g. integration, unit, stress
66+
if self.path_ == 'stress':
67+
self.category_ = 'stress'
68+
self.type_ = 'stress'
69+
elif self.path_ == 'examples':
70+
self.category_ = 'examples'
71+
self.type_ = 'examples'
72+
elif self.path_ == 'mod/gsl':
73+
self.category_ = 'mod'
74+
self.type_ = 'mod'
75+
elif self.path_ == '.':
76+
self.category_ = 'unit'
77+
self.type_ = 'unit'
78+
else:
79+
self.category_ = self.path_.split('/')[-3]
80+
self.type_ = self.path_.split('/')[-2]
81+
82+
if not name:
8183
self.name_ = path
8284
else:
8385
self.name_ = name
8486

85-
print pretty.INFO("Test"), "starting", self.name_
87+
# Check if the test is valid or not
88+
self.check_valid()
8689

8790
if clean:
8891
subprocess.check_output(["make","clean"])
8992
print pretty.C_GRAY + "\t Cleaned, now start... ", pretty.C_ENDC
9093

94+
def __str__(self):
95+
""" Print output about the test object """
96+
97+
return ('name_: {x[name_]} \n'
98+
'path_: {x[path_]} \n'
99+
'command_: {x[command_]} \n'
100+
'proc_: {x[proc_]} \n'
101+
'output_: {x[output_]} \n'
102+
'category_: {x[category_]} \n'
103+
'type_: {x[type_]} \n'
104+
'skip: {x[skip_]} \n'
105+
'skip_reason: {x[skip_reason_]} \n'
106+
).format(x=self.__dict__)
107+
91108
def start(self):
92109
os.chdir(startdir + "/" + self.path_)
93110
self.proc_ = subprocess.Popen(self.command_, shell=False,
@@ -118,41 +135,39 @@ def wait_status(self):
118135

119136
return self.proc_.returncode
120137

138+
def check_valid(self):
139+
""" Will check if a test is valid. The following points can declare a test valid:
140+
1. Contains the files required
141+
2. Not listed in the skipped_tests.json
142+
3. Not listed in the args.skip cmd line argument
121143
122-
123-
def integration_tests(subfolder=None):
124-
"""
125-
Loops over all valid tests as defined by ./validate_all.py. Runs them one by one and gives an update of the statuses at the end.
144+
Arguments:
145+
self: Class function
126146
"""
127-
global test_count
128-
if subfolder:
129-
valid = valid_tests(subfolder)
130-
else:
131-
valid = valid_tests()
132-
if not valid:
133-
print pretty.WARNING("Integration tests skipped")
134-
return 0
135-
136-
test_count += len(valid)
137-
print pretty.HEADER("Starting " + str(len(valid)) + " integration test(s)")
138-
processes = []
139-
140-
fail_count = 0
141-
for path in valid:
142-
processes.append(Test(path, clean = args.clean).start())
143-
144-
# Collect test results
145-
print pretty.HEADER("Collecting integration test results")
146-
147-
for p in processes:
148-
fail_count += 1 if p.wait_status() else 0
149-
150-
# Exit early if any tests failed
151-
if fail_count and args.fail:
152-
print pretty.FAIL(str(fail_count) + "integration tests failed")
153-
sys.exit(fail_count)
154-
155-
return fail_count
147+
# Test 1
148+
if not validate_test.validate_path(self.path_, verb = False):
149+
self.skip_ = True
150+
self.skip_reason_ = 'Failed validate_test, missing files'
151+
return
152+
153+
# Test 2
154+
# Figure out if the test should be skipped
155+
skip_json = json.loads(open("skipped_tests.json").read())
156+
for skip in skip_json:
157+
if self.path_ == skip['name']:
158+
self.skip_ = True
159+
self.skip_reason_ = 'Defined in skipped_tests.json'
160+
return
161+
162+
# Test 3
163+
if self.path_ in args.skip or self.category_ in args.skip:
164+
self.skip_ = True
165+
self.skip_reason_ = 'Defined by cmd line argument'
166+
return
167+
168+
self.skip_ = False
169+
self.skip_reason_ = None
170+
return
156171

157172

158173
def unit_tests():
@@ -172,6 +187,7 @@ def unit_tests():
172187

173188
return max(build_status, unit_status)
174189

190+
175191
def stress_test():
176192
"""Perform stresstest"""
177193
global test_count
@@ -192,6 +208,7 @@ def stress_test():
192208

193209
return 1 if stress.wait_status() else 0
194210

211+
195212
def examples_working():
196213
global test_count
197214
if ("examples" in args.skip):
@@ -211,47 +228,122 @@ def examples_working():
211228
fail_count += 1 if build or run else 0
212229
return fail_count
213230

231+
232+
def integration_tests(tests):
233+
""" Function that runs the tests that are passed to it.
234+
Filters out any invalid tests before running
235+
236+
Arguments:
237+
tests: List containing test objects to be run
238+
239+
Returns:
240+
integer: Number of tests that failed
241+
"""
242+
243+
# Only run the valid tests
244+
tests = [ x for x in tests if not x.skip_ and x.type_ == 'integration' ]
245+
246+
# Print info before starting to run
247+
print pretty.HEADER("Starting " + str(len(tests)) + " integration test(s)")
248+
for test in tests:
249+
print pretty.INFO("Test"), "starting", test.name_
250+
251+
processes = []
252+
fail_count = 0
253+
global test_count
254+
test_count += len(tests)
255+
256+
# Start running tests in parallell
257+
for test in tests:
258+
processes.append(test.start())
259+
260+
# Collect test results
261+
print pretty.HEADER("Collecting integration test results")
262+
263+
for p in processes:
264+
fail_count += 1 if p.wait_status() else 0
265+
266+
# Exit early if any tests failed
267+
if fail_count and args.fail:
268+
print pretty.FAIL(str(fail_count) + "integration tests failed")
269+
sys.exit(fail_count)
270+
271+
return fail_count
272+
273+
274+
def find_leaf_nodes():
275+
""" Used to find all leaf nodes in the test directory,
276+
this is to help identify all possible test directories.
277+
Only looks in folders that actually store tests
278+
279+
Returns:
280+
List: list of string with path to all leaf nodes
281+
"""
282+
leaf_nodes = []
283+
284+
for dirpath, dirnames, filenames in os.walk('.'):
285+
# Will now skip any path that is not defined as a test category
286+
# or ends with unit or integration -> no tests in those folders were
287+
# created
288+
if dirpath[2:].split('/')[0] in test_categories and dirpath.split('/')[-1] not in ['unit', 'integration']:
289+
if len(dirpath[2:].split('/')) <= 3 and not dirnames:
290+
leaf_nodes.append(dirpath[2:])
291+
292+
return leaf_nodes
293+
294+
214295
def main():
215-
global test_count
296+
# Find leaf nodes
297+
leaves = find_leaf_nodes()
298+
299+
# Populate test objects
300+
all_tests = [ Test(path) for path in leaves ]
301+
302+
# Figure out which tests are to be run
303+
test_categories_to_run = []
304+
test_types_to_run = []
305+
if args.tests:
306+
for argument in args.tests:
307+
if argument in test_categories and argument not in args.skip:
308+
test_categories_to_run.append(argument)
309+
elif argument in test_types and argument not in args.skip:
310+
test_types_to_run.append(argument)
311+
else:
312+
print 'Test specified is not recognised, exiting'
313+
sys.exit(1)
314+
else:
315+
test_types_to_run = test_types
216316

217-
# Warned about skipped tests
218-
# @note : doesn't warn if you use '-t a b c ...' to run specific tests
219-
if args.skip:
220-
for skip in args.skip:
221-
print_skipped(skip, "marked skipped on command line")
222317

318+
if test_categories_to_run:
319+
# This means that a specific category has been requested
320+
specific_tests = [ test for test in all_tests if test.category_ in test_categories_to_run ]
223321

224-
test_categories = ["integration", "examples", "unit", "stress"]
225-
if "integration" not in args.tests:
226-
test_folders = ["fs", "hw", "kernel", "net", "platform", "stl", "util"]
227-
else:
228-
test_folders = []
229-
tests_combined = test_categories + test_folders
230-
if args.tests:
231-
test_categories = [x for x in tests_combined if x in args.tests ]
232-
if args.skip:
233-
test_categories = [x for x in tests_combined if not x in args.skip]
322+
# Print which tests are skipped
323+
print_skipped(specific_tests)
234324

325+
# Run the tests
326+
integration = integration_tests(specific_tests)
327+
else:
328+
# Print which tests are skipped
329+
print_skipped(all_tests)
235330

236-
integration = integration_tests() if "integration" in test_categories else 0
237-
stress = stress_test() if "stress" in test_categories else 0
238-
unit = unit_tests() if "unit" in test_categories else 0
239-
examples = examples_working() if "examples" in test_categories else 0
240-
folders = integration_tests(subfolder=test_categories[0]) if test_categories[0] in test_folders else 0
331+
# Run the tests
332+
integration = integration_tests(all_tests) if "integration" in test_types_to_run else 0
241333

242-
status = max(integration, stress, unit, examples, folders)
334+
stress = stress_test() if "stress" in test_types_to_run else 0
335+
unit = unit_tests() if "unit" in test_types_to_run else 0
336+
examples = examples_working() if "examples" in test_types_to_run else 0
243337

244-
if (not test_count):
245-
print "No tests selected"
246-
exit(0)
338+
status = max(integration, stress, unit, examples)
339+
if (status == 0):
340+
print pretty.SUCCESS(str(test_count - status) + " / " + str(test_count)
341+
+ " tests passed, exiting with code 0")
342+
else:
343+
print pretty.FAIL(str(status) + " / " + str(test_count) + " tests failed ")
247344

248-
if (status == 0):
249-
print pretty.SUCCESS(str(test_count - status) + " / " + str(test_count)
250-
+ " tests passed, exiting with code 0")
251-
else:
252-
print pretty.FAIL(str(status) + " / " + str(test_count) + " tests failed ")
345+
sys.exit(status)
253346

254-
sys.exit(status)
255347

256348
if __name__ == '__main__':
257349
main()

0 commit comments

Comments
 (0)