|
6 | 6 | class TestEvaluationFunctions(TestBase): |
7 | 7 | _multiprocess_can_split_ = True |
8 | 8 |
|
| 9 | + def _check_list_evaluation_setups(self, size, **kwargs): |
| 10 | + evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy", |
| 11 | + **kwargs, size=size, |
| 12 | + sort_order='desc', |
| 13 | + output_format='dataframe') |
| 14 | + evals = openml.evaluations.list_evaluations("predictive_accuracy", |
| 15 | + **kwargs, size=size, |
| 16 | + sort_order='desc', |
| 17 | + output_format='dataframe') |
| 18 | + |
| 19 | + # Check if list is non-empty |
| 20 | + self.assertGreater(len(evals_setups), 0) |
| 21 | + # Check if output from sort is sorted in the right order |
| 22 | + self.assertSequenceEqual(sorted(evals_setups['value'].tolist(), reverse=True) |
| 23 | + , evals_setups['value'].tolist()) |
| 24 | + |
| 25 | + # Check if output and order of list_evaluations is preserved |
| 26 | + self.assertSequenceEqual(evals_setups['run_id'].tolist(), evals['run_id'].tolist()) |
| 27 | + # Check if the hyper-parameter column is as accurate and flow_id |
| 28 | + for index, row in evals_setups.iterrows(): |
| 29 | + params = openml.runs.get_run(row['run_id']).parameter_settings |
| 30 | + hyper_params = [tuple([param['oml:name'], param['oml:value']]) for param in params] |
| 31 | + self.assertTrue(sorted(row['parameters']) == sorted(hyper_params)) |
| 32 | + |
| 33 | + |
9 | 34 | def test_evaluation_list_filter_task(self): |
10 | 35 | openml.config.server = self.production_server |
11 | 36 |
|
@@ -145,54 +170,12 @@ def test_list_evaluation_measures(self): |
145 | 170 |
|
146 | 171 | def test_list_evaluations_setups_filter_flow(self): |
147 | 172 | openml.config.server = self.production_server |
148 | | - flow_id = 405 |
| 173 | + flow_id = [405] |
149 | 174 | size = 100 |
150 | | - evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy", |
151 | | - flow=[flow_id], size=size, |
152 | | - sort_order='desc', |
153 | | - output_format='dataframe') |
154 | | - evals = openml.evaluations.list_evaluations("predictive_accuracy", |
155 | | - flow=[flow_id], size=size, |
156 | | - sort_order='desc', |
157 | | - output_format='dataframe') |
158 | | - |
159 | | - # Check if list is non-empty |
160 | | - self.assertGreater(len(evals_setups), 0) |
161 | | - # Check if output from sort is sorted in the right order |
162 | | - self.assertTrue(sorted(list(evals_setups['value'].values), reverse=True) |
163 | | - == list(evals_setups['value'].values)) |
164 | | - # Check if output and order of list_evaluations is preserved |
165 | | - self.assertTrue((evals_setups['run_id'].values == evals['run_id'].values).all()) |
166 | | - # Check if the hyper-parameter column is as accurate and flow_id |
167 | | - for index, row in evals_setups.iterrows(): |
168 | | - params = openml.runs.get_run(row['run_id']).parameter_settings |
169 | | - hyper_params = [tuple([param['oml:name'], param['oml:value']]) for param in params] |
170 | | - self.assertTrue(sorted(row['parameters']) == sorted(hyper_params)) |
171 | | - self.assertEqual(row['flow_id'], flow_id) |
| 175 | + self._check_list_evaluation_setups(size, flow=flow_id) |
172 | 176 |
|
173 | 177 | def test_list_evaluations_setups_filter_task(self): |
174 | 178 | openml.config.server = self.production_server |
175 | | - task_id = 6 |
| 179 | + task_id = [6] |
176 | 180 | size = 100 |
177 | | - evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy", |
178 | | - task=[task_id], size=size, |
179 | | - sort_order='desc', |
180 | | - output_format='dataframe') |
181 | | - evals = openml.evaluations.list_evaluations("predictive_accuracy", |
182 | | - task=[task_id], size=size, |
183 | | - sort_order='desc', |
184 | | - output_format='dataframe') |
185 | | - |
186 | | - # Check if list is non-empty |
187 | | - self.assertGreater(len(evals_setups), 0) |
188 | | - # Check if output from sort is sorted in the right order |
189 | | - self.assertTrue(sorted(list(evals_setups['value'].values), reverse=True) |
190 | | - == list(evals_setups['value'].values)) |
191 | | - # Check if output and order of list_evaluations is preserved |
192 | | - self.assertTrue((evals_setups['run_id'].values == evals['run_id'].values).all()) |
193 | | - # Check if the hyper-parameter column is as accurate and task_id |
194 | | - for index, row in evals_setups.iterrows(): |
195 | | - params = openml.runs.get_run(row['run_id']).parameter_settings |
196 | | - hyper_params = [tuple([param['oml:name'], param['oml:value']]) for param in params] |
197 | | - self.assertTrue(sorted(row['parameters']) == sorted(hyper_params)) |
198 | | - self.assertEqual(row['task_id'], task_id) |
| 181 | + self._check_list_evaluation_setups(size, task=task_id) |
0 commit comments