Skip to content

Commit 359dd7a

Browse files
committed
Add full frame example
1 parent 9c16386 commit 359dd7a

5 files changed

Lines changed: 159 additions & 4 deletions

File tree

README.md

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@ Then you can start classifying realtime sensor data. We have examples for:
5656
5757
* [Audio](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/audio/classify.py) - grabs data from the microphone and classifies it in realtime.
5858
* [Camera](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify.py) - grabs data from a webcam and classifies it in realtime.
59+
* [Camera (full frame)](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify.py) - grabs data from a webcam and classifies it twice (once cut from the left, once cut from the right). This is useful if you have a wide-angle lense and don't want to miss any events.
5960
* [Still image](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify-image.py) - classifies a still image from your hard drive.
6061
* [Custom data](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/custom/classify.py) - classifies custom sensor data.
6162

edge_impulse_linux/image.py

Lines changed: 25 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -44,6 +44,13 @@ def __exit__(self, type, value, traceback):
4444
def classify(self, data):
4545
return super(ImageImpulseRunner, self).classify(data)
4646

47+
def get_frames(self, videoDeviceId = 0):
48+
self.videoCapture = cv2.VideoCapture(videoDeviceId)
49+
while not self.closed:
50+
success, img = self.videoCapture.read()
51+
if success:
52+
yield img
53+
4754
def classifier(self, videoDeviceId = 0):
4855
self.videoCapture = cv2.VideoCapture(videoDeviceId)
4956
while not self.closed:
@@ -54,7 +61,7 @@ def classifier(self, videoDeviceId = 0):
5461
res = self.classify(features)
5562
yield res, cropped
5663

57-
def get_features_from_image(self, img):
64+
def get_features_from_image(self, img, crop_direction_x='center', crop_direction_y='center'):
5865
features = []
5966

6067
EI_CLASSIFIER_INPUT_WIDTH = self.dim[0]
@@ -74,8 +81,23 @@ def get_features_from_image(self, img):
7481

7582
resized = cv2.resize(img, resize_size, interpolation = cv2.INTER_AREA)
7683

77-
crop_x = int((resize_size_w - resize_size_h) / 2) if resize_size_w > resize_size_h else 0
78-
crop_y = int((resize_size_h - resize_size_w) / 2) if resize_size_h > resize_size_w else 0
84+
if (crop_direction_x == 'center'):
85+
crop_x = int((resize_size_w - resize_size_h) / 2) if resize_size_w > resize_size_h else 0
86+
elif (crop_direction_x == 'left'):
87+
crop_x = 0
88+
elif (crop_direction_x == 'right'):
89+
crop_x = resize_size_w - EI_CLASSIFIER_INPUT_WIDTH
90+
else:
91+
raise Exception('Invalid value for crop_direction_x, should be center, left or right')
92+
93+
if (crop_direction_y == 'center'):
94+
crop_y = int((resize_size_h - resize_size_w) / 2) if resize_size_h > resize_size_w else 0
95+
elif (crop_direction_y == 'top'):
96+
crop_y = 0
97+
elif (crop_direction_y == 'bottom'):
98+
crop_y = resize_size_h - EI_CLASSIFIER_INPUT_HEIGHT
99+
else:
100+
raise Exception('Invalid value for crop_direction_y, should be center, top or bottom')
79101

80102
crop_region = (crop_x, crop_y, EI_CLASSIFIER_INPUT_WIDTH, EI_CLASSIFIER_INPUT_HEIGHT)
81103

Lines changed: 131 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,131 @@
1+
#!/usr/bin/env python
2+
3+
import cv2
4+
import os
5+
import sys, getopt
6+
import signal
7+
import time
8+
from edge_impulse_linux.image import ImageImpulseRunner
9+
10+
runner = None
11+
show_camera = False
12+
13+
def now():
14+
return round(time.time() * 1000)
15+
16+
def get_webcams():
17+
port_ids = []
18+
for port in range(5):
19+
print("Looking for a camera in port %s:" %port)
20+
camera = cv2.VideoCapture(port)
21+
if camera.isOpened():
22+
ret = camera.read()[0]
23+
if ret:
24+
backendName =camera.getBackendName()
25+
w = camera.get(3)
26+
h = camera.get(4)
27+
print("Camera %s (%s x %s) found in port %s " %(backendName,h,w, port))
28+
port_ids.append(port)
29+
camera.release()
30+
return port_ids
31+
32+
def sigint_handler(sig, frame):
33+
print('Interrupted')
34+
if (runner):
35+
runner.stop()
36+
sys.exit(0)
37+
38+
signal.signal(signal.SIGINT, sigint_handler)
39+
40+
def help():
41+
print('python classify.py <path_to_model.eim> <Camera port ID, only required when more than 1 camera is present>')
42+
43+
def main(argv):
44+
try:
45+
opts, args = getopt.getopt(argv, "h", ["--help"])
46+
except getopt.GetoptError:
47+
help()
48+
sys.exit(2)
49+
50+
for opt, arg in opts:
51+
if opt in ('-h', '--help'):
52+
help()
53+
sys.exit()
54+
55+
if len(args) == 0:
56+
help()
57+
sys.exit(2)
58+
59+
model = args[0]
60+
61+
dir_path = os.path.dirname(os.path.realpath(__file__))
62+
modelfile = os.path.join(dir_path, model)
63+
64+
print('MODEL: ' + modelfile)
65+
66+
with ImageImpulseRunner(modelfile) as runner:
67+
try:
68+
model_info = runner.init()
69+
print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
70+
labels = model_info['model_parameters']['labels']
71+
if len(args)>= 2:
72+
videoCaptureDeviceId = int(args[1])
73+
else:
74+
port_ids = get_webcams()
75+
if len(port_ids) == 0:
76+
raise Exception('Cannot find any webcams')
77+
if len(args)<= 1 and len(port_ids)> 1:
78+
raise Exception("Multiple cameras found. Add the camera port ID as a second argument to use to this script")
79+
videoCaptureDeviceId = int(port_ids[0])
80+
81+
camera = cv2.VideoCapture(videoCaptureDeviceId)
82+
ret = camera.read()[0]
83+
if ret:
84+
backendName = camera.getBackendName()
85+
w = camera.get(3)
86+
h = camera.get(4)
87+
print("Camera %s (%s x %s) in port %s selected." %(backendName,h,w, videoCaptureDeviceId))
88+
camera.release()
89+
else:
90+
raise Exception("Couldn't initialize selected camera.")
91+
92+
next_frame = 0 # limit to ~10 fps here
93+
94+
for img in runner.get_frames(videoCaptureDeviceId):
95+
if (next_frame > now()):
96+
time.sleep((next_frame - now()) / 1000)
97+
98+
# make two cuts from the image, one on the left and one on the right
99+
features_l, cropped_l = runner.get_features_from_image(img, 'left')
100+
features_r, cropped_r = runner.get_features_from_image(img, 'right')
101+
102+
# classify both
103+
res_l = runner.classify(features_l)
104+
res_r = runner.classify(features_r)
105+
106+
cv2.imwrite('debug_l.jpg', cropped_l)
107+
cv2.imwrite('debug_r.jpg', cropped_r)
108+
109+
def print_classification(res, tag):
110+
if "classification" in res["result"].keys():
111+
print('%s: Result (%d ms.) ' % (tag, res['timing']['dsp'] + res['timing']['classification']), end='')
112+
for label in labels:
113+
score = res['result']['classification'][label]
114+
print('%s: %.2f\t' % (label, score), end='')
115+
print('', flush=True)
116+
elif "bounding_boxes" in res["result"].keys():
117+
print('%s: Found %d bounding boxes (%d ms.)' % (tag, len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))
118+
for bb in res["result"]["bounding_boxes"]:
119+
print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
120+
121+
print_classification(res_l, 'LEFT')
122+
print_classification(res_r, 'RIGHT')
123+
124+
next_frame = now() + 100
125+
126+
finally:
127+
if (runner):
128+
runner.stop()
129+
130+
if __name__ == "__main__":
131+
main(sys.argv[1:])

examples/image/classify-image.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -42,6 +42,7 @@ def main(argv):
4242

4343
img = cv2.imread(args[1])
4444

45+
# get_features_from_image also takes a crop direction arguments in case you don't have square images
4546
features, cropped = runner.get_features_from_image(img)
4647

4748
# the image will be resized and cropped, save a copy of the picture here

setup.cfg

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
[metadata]
22
name = edge_impulse_linux
3-
version = 1.0.3
3+
version = 1.0.4
44
author = EdgeImpulse Inc.
55
author_email = hello@edgeimpulse.com
66
description = Python runner for real-time ML classification

0 commit comments

Comments
 (0)