|
1 | 1 | from collections import OrderedDict |
| 2 | +import hashlib |
2 | 3 | import logging |
3 | 4 | import os |
4 | 5 | import re |
@@ -816,6 +817,7 @@ def _create_task_cache_dir(self, task_id): |
816 | 817 | pass |
817 | 818 | return task_cache_dir |
818 | 819 |
|
| 820 | + def _perform_api_call(self, call, data=None, file_dictionary=None, add_authentication=True): |
819 | 821 | ############################################################################ |
820 | 822 | # Runs |
821 | 823 | def get_runs_list(self, task_id=None, flow_id=None, setup_id=None): |
@@ -1025,33 +1027,37 @@ def _read_url(self, url, add_authentication=False, data=None, filePath=None): |
1025 | 1027 | if not url.endswith("/"): |
1026 | 1028 | url += "/" |
1027 | 1029 | url += call |
1028 | | - return self._read_url(url, data=data, file_path=file_path) |
| 1030 | + return self._read_url(url, data=data, file_dictionary=file_dictionary) |
1029 | 1031 |
|
1030 | | - def _read_url(self, url, data=None, file_path=None): |
| 1032 | + def _read_url(self, url, data=None, file_dictionary=None): |
1031 | 1033 | if data is None: |
1032 | 1034 | data = {} |
1033 | 1035 | data['api_key'] = self.config.get('FAKE_SECTION', 'apikey') |
1034 | 1036 |
|
1035 | | - if file_path is not None: |
1036 | | - if os.path.isabs(file_path): |
1037 | | - try: |
1038 | | - decoder = arff.ArffDecoder() |
1039 | | - except: |
1040 | | - raise "The file you provided is not a valid arff file" |
1041 | | - |
1042 | | - fileElement={'dataset': open(file_path, 'rb')} |
1043 | | - data['description']= data.get('description') |
1044 | | - data.pop('dataset', None) |
1045 | | - |
1046 | | - try: |
1047 | | - response = requests.post(url, data=data, files=fileElement) |
| 1037 | + if file_dictionary is not None: |
| 1038 | + file_elements = {} |
| 1039 | + for key, path in file_dictionary.items(): |
| 1040 | + if os.path.isabs(path) and os.path.exists(path): |
| 1041 | + try: |
| 1042 | + if key is 'dataset': |
| 1043 | + decoder = arff.ArffDecoder() |
| 1044 | + with open(path) as fh: |
| 1045 | + decoder.decode(fh, encode_nominal=True) |
| 1046 | + except: |
| 1047 | + raise ValueError("The file you have provided is not a valid arff file") |
| 1048 | + |
| 1049 | + file_elements[key] = open(path, 'rb') |
1048 | 1050 | except URLError as error: |
1049 | 1051 | print(error) |
1050 | 1052 |
|
| 1053 | + else: |
| 1054 | + raise ValueError("File doesn't exist") |
| 1055 | + |
| 1056 | + response = requests.post(url, data=data, files=file_elements) |
1051 | 1057 | return response.status_code, response |
1052 | | - else: |
1053 | | - raise "File doesn't exists" |
1054 | 1058 |
|
| 1059 | + except URLError as error: |
| 1060 | + print(error) |
1055 | 1061 | else: |
1056 | 1062 | data = urlencode(data) |
1057 | 1063 | data = data.encode('utf-8') |
@@ -1096,38 +1102,41 @@ def _read_url(self, url, data=None, file_path=None): |
1096 | 1102 | def upload_dataset(self, description, file_path=None): |
1097 | 1103 | try: |
1098 | 1104 | data = {'description': description} |
1099 | | - return_code, dataset_xml = self._perform_api_call( |
1100 | | - "/data/", data=data, file_path=file_path) |
| 1105 | + if file_path is not None: |
| 1106 | + return_code, dataset_xml = self._perform_api_call("/data/",data=data, file_dictionary={'dataset': file_path}) |
1101 | 1107 |
|
1102 | 1108 | except URLError as e: |
1103 | 1109 | # TODO logger.debug |
1104 | 1110 | print(e) |
1105 | 1111 | raise e |
1106 | 1112 | return return_code, dataset_xml |
1107 | 1113 |
|
1108 | | - def upload_flow(self, description, binary, source): |
| 1114 | + def upload_flow(self, description, file_path=None): |
1109 | 1115 | try: |
1110 | | - data = {'description': description, 'binary': binary, 'source': source} |
1111 | | - return_code, dataset_xml = self._perform_api_call( |
1112 | | - "openml.implementation.upload", data=data) |
| 1116 | + data = {'description': description} |
| 1117 | + return_code, dataset_xml = self._perform_api_call("/flow/", data=data, file_dictionary={'source': file_path}) |
1113 | 1118 |
|
1114 | 1119 | except URLError as e: |
1115 | 1120 | # TODO logger.debug |
1116 | 1121 | print(e) |
1117 | 1122 | raise e |
1118 | 1123 | return return_code, dataset_xml |
1119 | 1124 |
|
1120 | | - def upload_run(self, description, files): |
1121 | | - try: |
1122 | | - data ={'description': description} |
1123 | | - for key, value in files: |
1124 | | - data[key] = value |
| 1125 | + def upload_run(self, files): |
| 1126 | + file_dictionary = {} |
| 1127 | + if 'predictions' in files: |
| 1128 | + try: |
| 1129 | + for key, value in files.items(): |
| 1130 | + file_dictionary[key] = value |
1125 | 1131 |
|
1126 | | - return_code, dataset_xml = self._perform_api_call("openml.run.upload", data=data) |
| 1132 | + return_code, dataset_xml = self._perform_api_call("/run/", file_dictionary=file_dictionary) |
| 1133 | + |
| 1134 | + except URLError as e: |
| 1135 | + # TODO logger.debug |
| 1136 | + print(e) |
| 1137 | + raise e |
| 1138 | + return return_code, dataset_xml |
| 1139 | + else: |
| 1140 | + raise ValueError("prediction files doesn't exist") |
1127 | 1141 |
|
1128 | | - except URLError as e: |
1129 | | - # TODO logger.debug |
1130 | | - print(e) |
1131 | | - raise e |
1132 | | - return return_code, dataset_xml |
1133 | 1142 |
|
0 commit comments