|
18 | 18 |
|
19 | 19 | from .datasets import OpenMLDataset, OpenMLDataFeature |
20 | 20 | from . import datasets |
| 21 | +from . import tasks |
21 | 22 | from . import runs |
22 | 23 | from . import flows |
23 | 24 | from .runs import OpenMLRun |
24 | 25 | from .tasks import OpenMLTask, OpenMLSplit |
25 | 26 | from .flows import OpenMLFlow |
26 | 27 |
|
| 28 | +__version__ = "0.3.0" |
| 29 | + |
| 30 | + |
| 31 | +def populate_cache(task_ids=None, dataset_ids=None, flow_ids=None, |
| 32 | + run_ids=None): |
| 33 | + """ |
| 34 | + Populate a cache for offline and parallel usage of the OpenML connector. |
| 35 | +
|
| 36 | + Parameters |
| 37 | + ---------- |
| 38 | + task_ids : iterable |
| 39 | +
|
| 40 | + dataset_ids : iterable |
| 41 | +
|
| 42 | + flow_ids : iterable |
| 43 | +
|
| 44 | + run_ids : iterable |
| 45 | +
|
| 46 | + Returns |
| 47 | + ------- |
| 48 | + None |
| 49 | + """ |
| 50 | + if task_ids is not None: |
| 51 | + for task_id in task_ids: |
| 52 | + tasks.functions.get_task(task_id) |
| 53 | + |
| 54 | + if dataset_ids is not None: |
| 55 | + for dataset_id in dataset_ids: |
| 56 | + datasets.functions.get_dataset(dataset_id) |
| 57 | + |
| 58 | + if flow_ids is not None: |
| 59 | + for flow_id in flow_ids: |
| 60 | + flows.functions.get_flow(flow_id) |
| 61 | + |
| 62 | + if run_ids is not None: |
| 63 | + for run_id in run_ids: |
| 64 | + runs.functions.get_run(run_id) |
27 | 65 |
|
28 | | -__version__ = "0.2.1" |
29 | 66 |
|
30 | 67 | __all__ = ['OpenMLDataset', 'OpenMLDataFeature', 'OpenMLRun', |
31 | 68 | 'OpenMLSplit', 'datasets', 'OpenMLTask', 'OpenMLFlow', |
32 | | - 'config', 'runs', 'flows'] |
| 69 | + 'config', 'runs', 'flows', 'tasks'] |
0 commit comments