Skip to content

Commit ab98226

Browse files
committed
initial commit
1 parent 7bd35d4 commit ab98226

4 files changed

Lines changed: 66 additions & 0 deletions

File tree

openml/evaluations/__init__.py

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
from .evaluation import OpenMLEvaluation
2+
from .functions import list_evaluations

openml/evaluations/evaluation.py

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,16 @@
1+
2+
class OpenMLEvaluation(object):
3+
4+
def __init__(self, run_id, task_id, setup_id, flow_id, flow_name,
5+
data_name, function, upload_time, value, array_data):
6+
self.run_id = run_id
7+
self.task_id = task_id
8+
self.setup_id = setup_id
9+
self.flow_id = flow_id
10+
self.flow_name = flow_name
11+
self.data_name = data_name
12+
self.function = function
13+
self.upload_time = upload_time
14+
self.value = value
15+
self.array_data = array_data
16+

openml/evaluations/functions.py

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
import xmltodict
2+
3+
from .._api_calls import _perform_api_call
4+
from ..evaluations import OpenMLEvaluation
5+
6+
def list_evaluations(function, task_id):
7+
"""Helper function to parse API calls which are lists of runs"""
8+
9+
xml_string = _perform_api_call("evaluation/list/funtion/%s/task_id/%d" %(function, task_id))
10+
11+
evals_dict = xmltodict.parse(xml_string)
12+
# Minimalistic check if the XML is useful
13+
if 'oml:evaluations' not in evals_dict:
14+
raise ValueError('Error in return XML, does not contain "oml:evaluations": %s'
15+
% str(evals_dict))
16+
17+
if isinstance(evals_dict['oml:evaluations']['oml:evaluation'], list):
18+
evals_list = evals_dict['oml:evaluations']['oml:evaluation']
19+
elif isinstance(evals_dict['oml:evaluations']['oml:evaluation'], dict):
20+
evals_list = [evals_dict['oml:runs']['oml:run']]
21+
else:
22+
raise TypeError()
23+
24+
evals = dict()
25+
for eval_ in evals_list:
26+
run_id = int(eval_['oml:run_id'])
27+
evaluation = OpenMLEvaluation(int(eval_['oml:run_id']), int(eval_['task_id']),
28+
int(eval_['oml:setup_id']), int(eval_['oml:flow_id']),
29+
eval_['oml:flow_name'], eval_['oml:data_name'],
30+
eval_['oml:function'], eval_['oml:upload_time'],
31+
float(eval_['oml:value']), eval_['oml:array_data'])
32+
evals[run_id] = evaluation
33+
return evaluation
34+
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
import unittest
2+
import openml
3+
import openml.evaluations
4+
from openml.testing import TestBase
5+
6+
class TestEvaluationFunctions(TestBase):
7+
8+
def test_evaluation_list(self):
9+
openml.config.server = self.production_server
10+
11+
res = openml.evaluations.list_evaluations("predictive_accuracy", 59)
12+
13+
self.assertGreater(len(res), 100)
14+

0 commit comments

Comments
 (0)