Skip to content

Commit 9c16386

Browse files
committed
Fix rounding error when resizing, add example for classifying standalone image
1 parent eb247d0 commit 9c16386

5 files changed

Lines changed: 109 additions & 33 deletions

File tree

.gitignore

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,3 +2,4 @@ __pycache__
22
dist
33
edge_impulse_linux.egg-info
44
build
5+
*.jpg

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ Then you can start classifying realtime sensor data. We have examples for:
5656
5757
* [Audio](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/audio/classify.py) - grabs data from the microphone and classifies it in realtime.
5858
* [Camera](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify.py) - grabs data from a webcam and classifies it in realtime.
59+
* [Still image](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify-image.py) - classifies a still image from your hard drive.
5960
* [Custom data](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/custom/classify.py) - classifies custom sensor data.
6061
6162
## Troubleshooting

edge_impulse_linux/image.py

Lines changed: 37 additions & 32 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
import numpy as np
44
import cv2
55
from edge_impulse_linux.runner import ImpulseRunner
6-
import time
6+
import math
77
import psutil
88

99
class ImageImpulseRunner(ImpulseRunner):
@@ -17,7 +17,7 @@ def __init__(self, model_path: str):
1717

1818
def init(self):
1919
if psutil.OSX or psutil.MACOS:
20-
print('Make sure that video devices access is granted for your application. runnin')
20+
print('Make sure that video devices access is granted for your application.')
2121
print('If your video device is not responding, try running "tccutil reset Camera" to reset the camera access privileges')
2222

2323
model_info = super(ImageImpulseRunner, self).init()
@@ -49,46 +49,51 @@ def classifier(self, videoDeviceId = 0):
4949
while not self.closed:
5050
success, img = self.videoCapture.read()
5151
if success:
52-
features = []
52+
features, cropped = self.get_features_from_image(img)
5353

54-
EI_CLASSIFIER_INPUT_WIDTH = self.dim[0]
55-
EI_CLASSIFIER_INPUT_HEIGHT = self.dim[1]
54+
res = self.classify(features)
55+
yield res, cropped
5656

57-
in_frame_cols = img.shape[1]
58-
in_frame_rows = img.shape[0]
57+
def get_features_from_image(self, img):
58+
features = []
5959

60-
factor_w = EI_CLASSIFIER_INPUT_WIDTH / in_frame_cols
61-
factor_h = EI_CLASSIFIER_INPUT_HEIGHT / in_frame_rows
60+
EI_CLASSIFIER_INPUT_WIDTH = self.dim[0]
61+
EI_CLASSIFIER_INPUT_HEIGHT = self.dim[1]
6262

63-
largest_factor = factor_w if factor_w > factor_h else factor_h
63+
in_frame_cols = img.shape[1]
64+
in_frame_rows = img.shape[0]
6465

65-
resize_size_w = int(largest_factor * in_frame_cols)
66-
resize_size_h = int(largest_factor * in_frame_rows)
67-
resize_size = (resize_size_w, resize_size_h)
66+
factor_w = EI_CLASSIFIER_INPUT_WIDTH / in_frame_cols
67+
factor_h = EI_CLASSIFIER_INPUT_HEIGHT / in_frame_rows
6868

69-
resized = cv2.resize(img, resize_size, interpolation = cv2.INTER_AREA)
69+
largest_factor = factor_w if factor_w > factor_h else factor_h
7070

71-
crop_x = int((resize_size_w - resize_size_h) / 2) if resize_size_w > resize_size_h else 0
72-
crop_y = int((resize_size_h - resize_size_w) / 2) if resize_size_h > resize_size_w else 0
71+
resize_size_w = int(math.ceil(largest_factor * in_frame_cols))
72+
resize_size_h = int(math.ceil(largest_factor * in_frame_rows))
73+
resize_size = (resize_size_w, resize_size_h)
7374

74-
crop_region = (crop_x, crop_y, EI_CLASSIFIER_INPUT_WIDTH, EI_CLASSIFIER_INPUT_HEIGHT)
75+
resized = cv2.resize(img, resize_size, interpolation = cv2.INTER_AREA)
7576

76-
cropped = resized[crop_region[1]:crop_region[1]+crop_region[3], crop_region[0]:crop_region[0]+crop_region[2]]
77+
crop_x = int((resize_size_w - resize_size_h) / 2) if resize_size_w > resize_size_h else 0
78+
crop_y = int((resize_size_h - resize_size_w) / 2) if resize_size_h > resize_size_w else 0
7779

78-
if self.isGrayscale:
79-
cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
80-
pixels = np.array(cropped).flatten().tolist()
80+
crop_region = (crop_x, crop_y, EI_CLASSIFIER_INPUT_WIDTH, EI_CLASSIFIER_INPUT_HEIGHT)
8181

82-
for p in pixels:
83-
features.append((p << 16) + (p << 8) + p)
84-
else:
85-
pixels = np.array(cropped).flatten().tolist()
82+
cropped = resized[crop_region[1]:crop_region[1]+crop_region[3], crop_region[0]:crop_region[0]+crop_region[2]]
8683

87-
for ix in range(0, len(pixels), 3):
88-
b = pixels[ix + 0]
89-
g = pixels[ix + 1]
90-
r = pixels[ix + 2]
91-
features.append((r << 16) + (g << 8) + b)
84+
if self.isGrayscale:
85+
cropped = cv2.cvtColor(cropped, cv2.COLOR_BGR2GRAY)
86+
pixels = np.array(cropped).flatten().tolist()
9287

93-
res = self.classify(features)
94-
yield res, cropped
88+
for p in pixels:
89+
features.append((p << 16) + (p << 8) + p)
90+
else:
91+
pixels = np.array(cropped).flatten().tolist()
92+
93+
for ix in range(0, len(pixels), 3):
94+
b = pixels[ix + 0]
95+
g = pixels[ix + 1]
96+
r = pixels[ix + 2]
97+
features.append((r << 16) + (g << 8) + b)
98+
99+
return features, cropped

examples/image/classify-image.py

Lines changed: 69 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,69 @@
1+
#!/usr/bin/env python
2+
3+
import cv2
4+
import os
5+
import sys, getopt
6+
import numpy as np
7+
from edge_impulse_linux.image import ImageImpulseRunner
8+
9+
runner = None
10+
11+
def help():
12+
print('python classify-image.py <path_to_model.eim> <path_to_image.jpg>')
13+
14+
def main(argv):
15+
try:
16+
opts, args = getopt.getopt(argv, "h", ["--help"])
17+
except getopt.GetoptError:
18+
help()
19+
sys.exit(2)
20+
21+
for opt, arg in opts:
22+
if opt in ('-h', '--help'):
23+
help()
24+
sys.exit()
25+
26+
if len(args) != 2:
27+
help()
28+
sys.exit(2)
29+
30+
model = args[0]
31+
32+
dir_path = os.path.dirname(os.path.realpath(__file__))
33+
modelfile = os.path.join(dir_path, model)
34+
35+
print('MODEL: ' + modelfile)
36+
37+
with ImageImpulseRunner(modelfile) as runner:
38+
try:
39+
model_info = runner.init()
40+
print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
41+
labels = model_info['model_parameters']['labels']
42+
43+
img = cv2.imread(args[1])
44+
45+
features, cropped = runner.get_features_from_image(img)
46+
47+
# the image will be resized and cropped, save a copy of the picture here
48+
# so you can see what's being passed into the classifier
49+
cv2.imwrite('debug.jpg', cropped)
50+
51+
res = runner.classify(features)
52+
53+
if "classification" in res["result"].keys():
54+
print('Result (%d ms.) ' % (res['timing']['dsp'] + res['timing']['classification']), end='')
55+
for label in labels:
56+
score = res['result']['classification'][label]
57+
print('%s: %.2f\t' % (label, score), end='')
58+
print('', flush=True)
59+
60+
elif "bounding_boxes" in res["result"].keys():
61+
print('Found %d bounding boxes (%d ms.)' % (len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))
62+
for bb in res["result"]["bounding_boxes"]:
63+
print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
64+
finally:
65+
if (runner):
66+
runner.stop()
67+
68+
if __name__ == "__main__":
69+
main(sys.argv[1:])

setup.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[metadata]
22
name = edge_impulse_linux
3-
version = 1.0.2
3+
version = 1.0.3
44
author = EdgeImpulse Inc.
55
author_email = hello@edgeimpulse.com
66
description = Python runner for real-time ML classification

0 commit comments

Comments
 (0)