Skip to content

Commit cb5acca

Browse files
Merge pull request #4 from BMW-InnovationLab/dev
Dev
2 parents a28815d + 0d1bf6b commit cb5acca

11 files changed

Lines changed: 281 additions & 331 deletions

README.md

Lines changed: 10 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ sudo docker build --build-arg http_proxy='' --build-arg https_proxy='' -t tensor
5656

5757
## Run the docker container
5858

59-
To run the API, go to the project's root directory and run the following:
59+
To run the API, go the to the API's directory and run the following:
6060

6161
#### Using Linux based docker:
6262

@@ -135,13 +135,17 @@ Returns the specified model's configuration
135135

136136
Performs inference on specified model and a list of images, and returns bounding boxes
137137

138+
**P.S: Custom endpoints like /load, /detect, and /get_labels should be used in a chronological order. First you have to call /load, and then call /detect or /get_labels**
139+
138140
## Model structure
139141

140142
The folder "models" contains subfolders of all the models to be loaded.
141143
Inside each subfolder there should be a:
142144

143-
- pb file: contains the model weights
144-
- pbtxt file: contains model classes
145+
- pb file (frozen_inference_graph.pb): contains the model weights
146+
147+
- pbtxt file (object-detection.pbtxt): contains model classes
148+
145149
- Config.json (This is a json file containing information about the model)
146150

147151
```json
@@ -156,10 +160,9 @@ Inside each subfolder there should be a:
156160
}
157161
```
158162
P.S:
159-
- "number_of_classes" value should be equal to your model's number of classes
160-
- You can change "confidence" and "predictions" values while running the API
161-
- The API will return bounding boxes with a confidence higher than the "confidence" value. A high "confidence" can show you only accurate predictions. "confidence" value should be between 0 and 100
162-
- The "predictions" value specifies the maximum number of bounding boxes in the API response. It should be positive
163+
- You can change confidence and predictions values while running the API
164+
- The API will return bounding boxes with a confidence higher than the "confidence" value. A high "confidence" can show you only accurate predictions
165+
- The "predictions" value specifies the maximum number of bounding boxes in the API response
163166

164167

165168
## Benchmarking
@@ -229,10 +232,3 @@ Inside each subfolder there should be a:
229232

230233
## Acknowledgment
231234

232-
[inmind.ai](https://inmind.ai)
233-
234-
[robotron.de](https://robotron.de)
235-
236-
Joe Sleiman, inmind.ai , Beirut, Lebanon
237-
238-
Antoine Charbel, inmind.ai, Beirut, Lebanon

src/main/deep_learning_service.py

100644100755
Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
import os
2+
import re
23
import json
34
import uuid
4-
import re
55
from inference.inference_engines_factory import InferenceEngineFactory
66
from inference.exceptions import ModelNotFound, InvalidModelConfiguration, ModelNotLoaded, InferenceEngineNotFound, \
77
InvalidInputData, ApplicationError
@@ -75,14 +75,14 @@ def load_models(self, model_names):
7575
for model in model_names:
7676
self.load_model(model)
7777

78-
async def run_model(self, model_name, input_data, draw_boxes, predict_batch):
78+
async def run_model(self, model_name, input_data, draw, predict_batch):
7979
"""
8080
Loads the model in case it was never loaded and calls the inference engine class to get a prediction.
8181
:param model_name: Model name
8282
:param input_data: Batch of images or a single image
83-
:param draw_boxes: Boolean to specify if we need to draw the response on the input image
83+
:param draw: Boolean to specify if we need to draw the response on the input image
8484
:param predict_batch: Boolean to specify if there is a batch of images in a request or not
85-
:return: Model response in case draw_boxes was set to False, else an actual image
85+
:return: Model response in case draw was set to False, else an actual image
8686
"""
8787
if re.match(r'[0-9a-fA-F]{8}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{4}\-[0-9a-fA-F]{12}', model_name,
8888
flags=0):
@@ -92,18 +92,18 @@ async def run_model(self, model_name, input_data, draw_boxes, predict_batch):
9292
if self.model_loaded(model_name):
9393
try:
9494
if predict_batch:
95-
return await self.models_dict[model_name].run_batch(input_data, draw_boxes, predict_batch)
95+
return await self.models_dict[model_name].run_batch(input_data, draw, predict_batch)
9696
else:
97-
if not draw_boxes:
98-
return await self.models_dict[model_name].run(input_data, draw_boxes, predict_batch)
97+
if not draw:
98+
return await self.models_dict[model_name].infer(input_data, draw, predict_batch)
9999
else:
100-
await self.models_dict[model_name].run(input_data, draw_boxes, predict_batch)
100+
await self.models_dict[model_name].infer(input_data, draw, predict_batch)
101101
except ApplicationError as e:
102102
raise e
103103
else:
104104
try:
105105
self.load_model(model_name)
106-
return await self.run_model(model_name, input_data, draw_boxes, predict_batch)
106+
return await self.run_model(model_name, input_data, draw, predict_batch)
107107
except ApplicationError as e:
108108
raise e
109109

src/main/inference/__init__.py

100644100755
File mode changed.

src/main/inference/base_error.py

100644100755
Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ def __init__(self):
1212
"""
1313
self.logger = logging.getLogger('logger')
1414
date = datetime.now().strftime('%Y-%m-%d')
15-
file_path = 'logs/tensorflow_inference_engine_' + date + '.log'
15+
file_path = 'logs/' + date + '.log'
1616
self.handler = logging.FileHandler(file_path)
1717
self.handler.setLevel(logging.INFO)
1818
self.handler.setFormatter(logging.Formatter("%(levelname)s;%(asctime)s;%(message)s"))

src/main/inference/base_inference_engine.py

100644100755
Lines changed: 79 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -4,89 +4,90 @@
44

55
class AbstractInferenceEngine(ABC):
66

7-
def __init__(self, model_path):
8-
"""
9-
Takes a model path and calls the load function.
10-
:param model_path: The model's path
11-
:return:
12-
"""
13-
self.labels = []
14-
self.configuration = {}
15-
self.model_path = model_path
16-
try:
17-
self.validate_configuration()
18-
except ApplicationError as e:
19-
raise e
20-
try:
21-
self.load()
22-
except ApplicationError as e:
23-
raise e
24-
except Exception as e:
25-
raise ModelNotLoaded()
7+
def __init__(self, model_path):
8+
"""
9+
Takes a model path and calls the load function.
10+
:param model_path: The model's path
11+
:return:
12+
"""
13+
self.labels = []
14+
self.configuration = {}
15+
self.model_path = model_path
16+
try:
17+
self.validate_configuration()
18+
except ApplicationError as e:
19+
raise e
20+
try:
21+
self.load()
22+
except ApplicationError as e:
23+
raise e
24+
except Exception as e:
25+
print(e)
26+
raise ModelNotLoaded()
2627

27-
@abstractmethod
28-
def load(self):
29-
"""
30-
Loads the model based on the underlying implementation.
31-
"""
32-
pass
28+
@abstractmethod
29+
def load(self):
30+
"""
31+
Loads the model based on the underlying implementation.
32+
"""
33+
pass
3334

34-
@abstractmethod
35-
def free(self):
36-
"""
37-
Performs any manual memory implementation required to when unloading a model.
38-
Will be called when the class's destructor is called.
39-
"""
40-
pass
35+
@abstractmethod
36+
def free(self):
37+
"""
38+
Performs any manual memory implementation required to when unloading a model.
39+
Will be called when the class's destructor is called.
40+
"""
41+
pass
4142

42-
@abstractmethod
43-
async def run(self, input_data, draw_boxes, predict_batch):
44-
"""
45-
Performs the required inference based on the underlying implementation of this class.
46-
Could be used to return classification predictions, object detection coordinates...
47-
:param predict_batch: Boolean
48-
:param input_data: A single image
49-
:param draw_boxes: Used to draw bounding boxes on image instead of returning them
50-
:return: A bounding-box
51-
"""
52-
pass
43+
@abstractmethod
44+
async def infer(self, input_data, draw, predict_batch):
45+
"""
46+
Performs the required inference based on the underlying implementation of this class.
47+
Could be used to return classification predictions, object detection coordinates...
48+
:param predict_batch: Boolean
49+
:param input_data: A single image
50+
:param draw: Used to draw bounding boxes on image instead of returning them
51+
:return: A bounding-box
52+
"""
53+
pass
5354

54-
@abstractmethod
55-
async def run_batch(self, input_data, draw_boxes, predict_batch):
56-
"""
57-
Iterates over images and returns a prediction for each one.
58-
:param predict_batch: Boolean
59-
:param input_data: List of images
60-
:param draw_boxes: Used to draw bounding boxes on image instead of returning them
61-
:return: List of bounding-boxes
62-
"""
63-
pass
55+
@abstractmethod
56+
async def run_batch(self, input_data, draw, predict_batch):
57+
"""
58+
Iterates over images and returns a prediction for each one.
59+
:param predict_batch: Boolean
60+
:param input_data: List of images
61+
:param draw: Used to draw bounding boxes on image instead of returning them
62+
:return: List of bounding-boxes
63+
"""
64+
pass
6465

65-
@abstractmethod
66-
def validate_configuration(self):
67-
"""
68-
Validates that the model and its files are valid based on the underlying implementation's requirements.
69-
Can check for configuration values, folder structure...
70-
"""
71-
pass
66+
@abstractmethod
67+
def validate_configuration(self):
68+
"""
69+
Validates that the model and its files are valid based on the underlying implementation's requirements.
70+
Can check for configuration values, folder structure...
71+
"""
72+
pass
7273

73-
@abstractmethod
74-
def set_configuration(self, data):
75-
"""
76-
Takes the configuration from the config.json file
77-
:param data: Json data
78-
:return:
79-
"""
80-
pass
74+
@abstractmethod
75+
def set_model_configuration(self, data):
76+
"""
77+
Takes the configuration from the config.json file
78+
:param data: Json data
79+
:return:
80+
"""
81+
pass
8182

82-
@abstractmethod
83-
def validate_json_configuration(self, data):
84-
"""
85-
Validates the configuration of the config.json file.
86-
:param data: Json data
87-
:return:
88-
"""
89-
pass
83+
@abstractmethod
84+
def validate_json_configuration(self, data):
85+
"""
86+
Validates the configuration of the config.json file.
87+
:param data: Json data
88+
:return:
89+
"""
90+
pass
9091

91-
def __del__(self):
92-
self.free()
92+
def __del__(self):
93+
self.free()

src/main/inference/errors.py

100644100755
Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import os
22
import logging
3-
from datetime import datetime, date
3+
from datetime import datetime
44
from inference.base_error import AbstractError
55

66

@@ -31,15 +31,15 @@ def check_date(self):
3131
:return:
3232
"""
3333
self.date = datetime.now().strftime('%Y-%m-%d')
34-
file_path = 'tensorflow_inference_engine_' + self.date + '.log'
34+
file_path = self.date + '.log'
3535
if file_path not in os.listdir('logs'):
3636
self.logger.removeHandler(self.handler)
3737
self.handler = logging.FileHandler('logs/' + file_path)
3838
self.handler.setLevel(logging.INFO)
3939
self.handler.setFormatter(logging.Formatter("%(levelname)s;%(asctime)s;%(message)s"))
4040
self.logger.addHandler(self.handler)
4141
oldest_log_file = os.listdir('logs')[0]
42-
oldest_date = oldest_log_file.split("_")[3].split('.')[0]
42+
oldest_date = oldest_log_file.split('.')[0]
4343
a = datetime.strptime(datetime.now().strftime('%Y-%m-%d'), '%Y-%m-%d')
4444
b = datetime.strptime(oldest_date, '%Y-%m-%d')
4545
delta = a - b

src/main/inference/exceptions.py

100644100755
File mode changed.

src/main/inference/inference_engines_factory.py

100644100755
Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,8 @@ def get_engine(path_to_model):
2727
# model instance
2828
return getattr(__import__(inference_engine_name), 'InferenceEngine')(path_to_model)
2929
except ApplicationError as e:
30+
print(e)
3031
raise e
3132
except Exception as e:
33+
print(e)
3234
raise InferenceEngineNotFound(inference_engine_name)

0 commit comments

Comments
 (0)