Skip to content

Commit 272b208

Browse files
committed
flake8 warnings
1 parent 1d28529 commit 272b208

3 files changed

Lines changed: 23 additions & 19 deletions

File tree

openml/evaluations/__init__.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from .evaluation import OpenMLEvaluation
22
from .functions import list_evaluations, list_evaluation_measures, list_evaluations_setups
33

4-
__all__ = ['OpenMLEvaluation', 'list_evaluations', 'list_evaluation_measures', 'list_evaluations_setups']
4+
__all__ = ['OpenMLEvaluation', 'list_evaluations', 'list_evaluation_measures',
5+
'list_evaluations_setups']

openml/evaluations/functions.py

Lines changed: 9 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -298,28 +298,30 @@ def list_evaluations_setups(
298298
dict or dataframe
299299
"""
300300
# List evaluations
301-
evals = list_evaluations(function=function, offset=offset, size=size, id=id, task=task, setup=setup, flow=flow,
302-
uploader=uploader, tag=tag, per_fold=per_fold, sort_order=sort_order,
303-
output_format='dataframe')
301+
evals = list_evaluations(function=function, offset=offset, size=size, id=id, task=task,
302+
setup=setup, flow=flow, uploader=uploader, tag=tag,
303+
per_fold=per_fold, sort_order=sort_order, output_format='dataframe')
304304

305305
# List setups
306306
# Split setups in evals into chunks of N setups as list_setups does not support long lists
307307
N = 100
308-
setup_chunks = np.split(evals['setup_id'].unique(), ((len(evals['setup_id'].unique()) - 1) // N) + 1)
308+
setup_chunks = np.split(evals['setup_id'].unique(),
309+
((len(evals['setup_id'].unique()) - 1) // N) + 1)
309310
setups = pd.DataFrame()
310311
for setup in setup_chunks:
311-
result = openml.setups.list_setups(setup=list(setup), output_format='dataframe')
312+
result = pd.DataFrame(openml.setups.list_setups(setup=setup, output_format='dataframe'))
312313
result.drop('flow_id', axis=1, inplace=True)
313314
setups = pd.concat([setups, result], ignore_index=True)
314315
parameters = []
315316
for parameter_dict in setups['parameters']:
316317
if parameter_dict is not None:
317-
parameters.append([tuple([param['parameter_name'], param['value']]) for param in parameter_dict.values()])
318+
parameters.append([tuple([param['parameter_name'], param['value']])
319+
for param in parameter_dict.values()])
318320
else:
319321
parameters.append([])
320322
setups['parameters'] = parameters
321323
# Merge setups with evaluations
322-
df = evals.merge(setups, on='setup_id', how='left')
324+
df = pd.DataFrame(evals.merge(setups, on='setup_id', how='left'))
323325
if output_format == 'dataframe':
324326
return df
325327
else:

tests/test_evaluations/test_evaluation_functions.py

Lines changed: 12 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -148,11 +148,13 @@ def test_list_evaluations_setups_filter_flow(self):
148148
flow_id = 405
149149
size = 10
150150
evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy",
151-
flow=[flow_id], size=size,
152-
sort_order='desc', output_format='dataframe')
151+
flow=[flow_id], size=size,
152+
sort_order='desc',
153+
output_format='dataframe')
153154
evals = openml.evaluations.list_evaluations("predictive_accuracy",
154-
flow=[flow_id], size=size,
155-
sort_order='desc', output_format='dataframe')
155+
flow=[flow_id], size=size,
156+
sort_order='desc',
157+
output_format='dataframe')
156158

157159
# Check if list is non-empty
158160
self.assertGreater(len(evals_setups), 0)
@@ -170,11 +172,13 @@ def test_list_evaluations_setups_filter_task(self):
170172
task_id = 6
171173
size = 20
172174
evals_setups = openml.evaluations.list_evaluations_setups("predictive_accuracy",
173-
task=[task_id], size=size,
174-
sort_order='desc', output_format='dataframe')
175+
task=[task_id], size=size,
176+
sort_order='desc',
177+
output_format='dataframe')
175178
evals = openml.evaluations.list_evaluations("predictive_accuracy",
176-
task=[task_id], size=size,
177-
sort_order='desc', output_format='dataframe')
179+
task=[task_id], size=size,
180+
sort_order='desc',
181+
output_format='dataframe')
178182

179183
# Check if list is non-empty
180184
self.assertGreater(len(evals_setups), 0)
@@ -186,6 +190,3 @@ def test_list_evaluations_setups_filter_task(self):
186190
hyper_params = [tuple([param['oml:name'], param['oml:value']]) for param in params]
187191
self.assertTrue((row['parameters'] == hyper_params))
188192
self.assertEqual(row['task_id'], task_id)
189-
190-
191-

0 commit comments

Comments
 (0)