Skip to content

Commit 0e1a75c

Browse files
committed
preserve dict row order
1 parent e803565 commit 0e1a75c

2 files changed

Lines changed: 10 additions & 4 deletions

File tree

openml/evaluations/functions.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -211,8 +211,8 @@ def __list_evaluations(api_call, output_format='object'):
211211
'array_data': array_data}
212212

213213
if output_format == 'dataframe':
214-
data, index = list(evals.values()), list(evals.keys())
215-
evals = pd.DataFrame(data, index=index)
214+
rows = [value for key, value in evals.items()]
215+
evals = (pd.DataFrame.from_records(rows, columns=rows[0].keys()))
216216
return evals
217217

218218

tests/test_evaluations/test_evaluation_functions.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ def test_list_evaluation_measures(self):
146146
def test_list_evaluations_setups_filter_flow(self):
147147
openml.config.server = self.production_server
148148
flow_id = 405
149-
size = 10
149+
size = 100
150150
evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy",
151151
flow=[flow_id], size=size,
152152
sort_order='desc',
@@ -158,6 +158,9 @@ def test_list_evaluations_setups_filter_flow(self):
158158

159159
# Check if list is non-empty
160160
self.assertGreater(len(evals_setups), 0)
161+
# Check if output from sort is sorted in the right order
162+
self.assertTrue(sorted(list(evals_setups['value'].values), reverse=True)
163+
== list(evals_setups['value'].values))
161164
# Check if output and order of list_evaluations is preserved
162165
self.assertTrue((evals_setups['run_id'].values == evals['run_id'].values).all())
163166
# Check if the hyper-parameter column is as accurate and flow_id
@@ -170,7 +173,7 @@ def test_list_evaluations_setups_filter_flow(self):
170173
def test_list_evaluations_setups_filter_task(self):
171174
openml.config.server = self.production_server
172175
task_id = 6
173-
size = 20
176+
size = 100
174177
evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy",
175178
task=[task_id], size=size,
176179
sort_order='desc',
@@ -182,6 +185,9 @@ def test_list_evaluations_setups_filter_task(self):
182185

183186
# Check if list is non-empty
184187
self.assertGreater(len(evals_setups), 0)
188+
# Check if output from sort is sorted in the right order
189+
self.assertTrue(sorted(list(evals_setups['value'].values), reverse=True)
190+
== list(evals_setups['value'].values))
185191
# Check if output and order of list_evaluations is preserved
186192
self.assertTrue((evals_setups['run_id'].values == evals['run_id'].values).all())
187193
# Check if the hyper-parameter column is as accurate and task_id

0 commit comments

Comments
 (0)