Skip to content

Commit cabd377

Browse files
committed
review comments
1 parent a113ba4 commit cabd377

3 files changed

Lines changed: 41 additions & 67 deletions

File tree

openml/evaluations/functions.py

Lines changed: 12 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -268,27 +268,23 @@ def list_evaluations_setups(
268268
the number of runs to skip, starting from the first
269269
size : int, optional
270270
the maximum number of runs to show
271-
272-
id : list, optional
273-
274-
task : list, optional
275-
276-
setup: list, optional
277-
278-
flow : list, optional
279-
280-
uploader : list, optional
281-
271+
id : list[int], optional
272+
the list of evaluation ID's
273+
task : list[int], optional
274+
the list of task ID's
275+
setup: list[int], optional
276+
the list of setup ID's
277+
flow : list[int], optional
278+
the list of flow ID's
279+
uploader : list[int], optional
280+
the list of uploader ID's
282281
tag : str, optional
283-
282+
filter evaluation based on given tag
284283
per_fold : bool, optional
285-
286284
sort_order : str, optional
287285
order of sorting evaluations, ascending ("asc") or descending ("desc")
288-
289-
output_format: str, optional (default='object')
286+
output_format: str, optional (default='dataframe')
290287
The parameter decides the format of the output.
291-
292288
- If 'dict' the output is a dict of dict
293289
- If 'dataframe' the output is a pandas DataFrame
294290

tests/test_evaluations/test_evaluation_functions.py

Lines changed: 29 additions & 46 deletions
Original file line numberDiff line numberDiff line change
@@ -6,6 +6,31 @@
66
class TestEvaluationFunctions(TestBase):
77
_multiprocess_can_split_ = True
88

9+
def _check_list_evaluation_setups(self, size, **kwargs):
10+
evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy",
11+
**kwargs, size=size,
12+
sort_order='desc',
13+
output_format='dataframe')
14+
evals = openml.evaluations.list_evaluations("predictive_accuracy",
15+
**kwargs, size=size,
16+
sort_order='desc',
17+
output_format='dataframe')
18+
19+
# Check if list is non-empty
20+
self.assertGreater(len(evals_setups), 0)
21+
# Check if output from sort is sorted in the right order
22+
self.assertSequenceEqual(sorted(evals_setups['value'].tolist(), reverse=True)
23+
, evals_setups['value'].tolist())
24+
25+
# Check if output and order of list_evaluations is preserved
26+
self.assertSequenceEqual(evals_setups['run_id'].tolist(), evals['run_id'].tolist())
27+
# Check if the hyper-parameter column is as accurate and flow_id
28+
for index, row in evals_setups.iterrows():
29+
params = openml.runs.get_run(row['run_id']).parameter_settings
30+
hyper_params = [tuple([param['oml:name'], param['oml:value']]) for param in params]
31+
self.assertTrue(sorted(row['parameters']) == sorted(hyper_params))
32+
33+
934
def test_evaluation_list_filter_task(self):
1035
openml.config.server = self.production_server
1136

@@ -145,54 +170,12 @@ def test_list_evaluation_measures(self):
145170

146171
def test_list_evaluations_setups_filter_flow(self):
147172
openml.config.server = self.production_server
148-
flow_id = 405
173+
flow_id = [405]
149174
size = 100
150-
evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy",
151-
flow=[flow_id], size=size,
152-
sort_order='desc',
153-
output_format='dataframe')
154-
evals = openml.evaluations.list_evaluations("predictive_accuracy",
155-
flow=[flow_id], size=size,
156-
sort_order='desc',
157-
output_format='dataframe')
158-
159-
# Check if list is non-empty
160-
self.assertGreater(len(evals_setups), 0)
161-
# Check if output from sort is sorted in the right order
162-
self.assertTrue(sorted(list(evals_setups['value'].values), reverse=True)
163-
== list(evals_setups['value'].values))
164-
# Check if output and order of list_evaluations is preserved
165-
self.assertTrue((evals_setups['run_id'].values == evals['run_id'].values).all())
166-
# Check if the hyper-parameter column is as accurate and flow_id
167-
for index, row in evals_setups.iterrows():
168-
params = openml.runs.get_run(row['run_id']).parameter_settings
169-
hyper_params = [tuple([param['oml:name'], param['oml:value']]) for param in params]
170-
self.assertTrue(sorted(row['parameters']) == sorted(hyper_params))
171-
self.assertEqual(row['flow_id'], flow_id)
175+
self._check_list_evaluation_setups(size, flow=flow_id)
172176

173177
def test_list_evaluations_setups_filter_task(self):
174178
openml.config.server = self.production_server
175-
task_id = 6
179+
task_id = [6]
176180
size = 100
177-
evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy",
178-
task=[task_id], size=size,
179-
sort_order='desc',
180-
output_format='dataframe')
181-
evals = openml.evaluations.list_evaluations("predictive_accuracy",
182-
task=[task_id], size=size,
183-
sort_order='desc',
184-
output_format='dataframe')
185-
186-
# Check if list is non-empty
187-
self.assertGreater(len(evals_setups), 0)
188-
# Check if output from sort is sorted in the right order
189-
self.assertTrue(sorted(list(evals_setups['value'].values), reverse=True)
190-
== list(evals_setups['value'].values))
191-
# Check if output and order of list_evaluations is preserved
192-
self.assertTrue((evals_setups['run_id'].values == evals['run_id'].values).all())
193-
# Check if the hyper-parameter column is as accurate and task_id
194-
for index, row in evals_setups.iterrows():
195-
params = openml.runs.get_run(row['run_id']).parameter_settings
196-
hyper_params = [tuple([param['oml:name'], param['oml:value']]) for param in params]
197-
self.assertTrue(sorted(row['parameters']) == sorted(hyper_params))
198-
self.assertEqual(row['task_id'], task_id)
181+
self._check_list_evaluation_setups(size, task=task_id)

tests/test_flows/test_flow_functions.py

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -286,11 +286,6 @@ def test_get_flow_reinstantiate_model_no_extension(self):
286286
@unittest.skipIf(LooseVersion(sklearn.__version__) == "0.19.1",
287287
reason="Target flow is from sklearn 0.19.1")
288288
def test_get_flow_reinstantiate_model_wrong_version(self):
289-
openml.config.server = self.production_server
290-
# 20 is scikit-learn ==0.20.0
291-
# I can't find a != 0.20 permanent flow on the test server.
292-
self.assertRaises(ValueError, openml.flows.get_flow, flow_id=7238, reinstantiate=True)
293-
294289
# Note that CI does not test against 0.19.1.
295290
openml.config.server = self.production_server
296291
_, sklearn_major, _ = LooseVersion(sklearn.__version__).version[:3]

0 commit comments

Comments
 (0)