Skip to content

Commit 202a388

Browse files
authored
Added classify-video example (#10)
1 parent 629f256 commit 202a388

2 files changed

Lines changed: 110 additions & 1 deletion

File tree

README.md

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -56,8 +56,9 @@ Then you can start classifying realtime sensor data. We have examples for:
5656
5757
* [Audio](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/audio/classify.py) - grabs data from the microphone and classifies it in realtime.
5858
* [Camera](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify.py) - grabs data from a webcam and classifies it in realtime.
59-
* [Camera (full frame)](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify.py) - grabs data from a webcam and classifies it twice (once cut from the left, once cut from the right). This is useful if you have a wide-angle lense and don't want to miss any events.
59+
* [Camera (full frame)](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify-full-frame.py) - grabs data from a webcam and classifies it twice (once cut from the left, once cut from the right). This is useful if you have a wide-angle lense and don't want to miss any events.
6060
* [Still image](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify-image.py) - classifies a still image from your hard drive.
61+
* [Video](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/image/classify-video.py) - grabs frames from a video source from your hard drive and classifies it.
6162
* [Custom data](https://github.com/edgeimpulse/linux-sdk-python/blob/master/examples/custom/classify.py) - classifies custom sensor data.
6263
6364
## Troubleshooting

examples/image/classify-video.py

Lines changed: 108 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,108 @@
1+
#!/usr/bin/env python
2+
3+
import device_patches # Device specific patches for Jetson Nano (needs to be before importing cv2)
4+
5+
import cv2
6+
import os
7+
import time
8+
import sys, getopt
9+
import numpy as np
10+
from edge_impulse_linux.image import ImageImpulseRunner
11+
12+
runner = None
13+
# if you don't want to see a video preview, set this to False
14+
show_camera = True
15+
if (sys.platform == 'linux' and not os.environ.get('DISPLAY')):
16+
show_camera = False
17+
18+
19+
def help():
20+
print('python classify-video.py <path_to_model.eim> <path_to_video.mp4>')
21+
22+
def main(argv):
23+
try:
24+
opts, args = getopt.getopt(argv, "h", ["--help"])
25+
except getopt.GetoptError:
26+
help()
27+
sys.exit(2)
28+
29+
for opt, arg in opts:
30+
if opt in ('-h', '--help'):
31+
help()
32+
sys.exit()
33+
34+
if len(args) != 2:
35+
help()
36+
sys.exit(2)
37+
38+
model = args[0]
39+
40+
dir_path = os.path.dirname(os.path.realpath(__file__))
41+
modelfile = os.path.join(dir_path, model)
42+
43+
print('MODEL: ' + modelfile)
44+
45+
with ImageImpulseRunner(modelfile) as runner:
46+
try:
47+
model_info = runner.init()
48+
print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"')
49+
labels = model_info['model_parameters']['labels']
50+
51+
vidcap = cv2.VideoCapture(args[1])
52+
sec = 0
53+
start_time = time.time()
54+
55+
def getFrame(sec):
56+
vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*1000)
57+
hasFrames,image = vidcap.read()
58+
if hasFrames:
59+
return image
60+
else:
61+
print('Failed to load frame', args[1])
62+
exit(1)
63+
64+
65+
img = getFrame(sec)
66+
67+
while img.size != 0:
68+
69+
# imread returns images in BGR format, so we need to convert to RGB
70+
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
71+
72+
# get_features_from_image also takes a crop direction arguments in case you don't have square images
73+
features, cropped = runner.get_features_from_image(img)
74+
75+
# the image will be resized and cropped, save a copy of the picture here
76+
# so you can see what's being passed into the classifier
77+
cv2.imwrite('debug.jpg', cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
78+
79+
res = runner.classify(features)
80+
81+
if "classification" in res["result"].keys():
82+
print('Result (%d ms.) ' % (res['timing']['dsp'] + res['timing']['classification']), end='')
83+
for label in labels:
84+
score = res['result']['classification'][label]
85+
print('%s: %.2f\t' % (label, score), end='')
86+
print('', flush=True)
87+
88+
elif "bounding_boxes" in res["result"].keys():
89+
print('Found %d bounding boxes (%d ms.)' % (len(res["result"]["bounding_boxes"]), res['timing']['dsp'] + res['timing']['classification']))
90+
for bb in res["result"]["bounding_boxes"]:
91+
print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height']))
92+
img = cv2.rectangle(cropped, (bb['x'], bb['y']), (bb['x'] + bb['width'], bb['y'] + bb['height']), (255, 0, 0), 1)
93+
94+
if (show_camera):
95+
cv2.imshow('edgeimpulse', cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR))
96+
if cv2.waitKey(1) == ord('q'):
97+
break
98+
99+
sec = time.time() - start_time
100+
sec = round(sec, 2)
101+
print("Getting frame at: %.2f sec" % sec)
102+
img = getFrame(sec)
103+
finally:
104+
if (runner):
105+
runner.stop()
106+
107+
if __name__ == "__main__":
108+
main(sys.argv[1:])

0 commit comments

Comments
 (0)