From fb1208c16ca9af7f01ce8abfbf9a502b06a36865 Mon Sep 17 00:00:00 2001 From: Raul James Date: Thu, 29 Feb 2024 13:59:42 +0100 Subject: [PATCH] step through video frame_interval at a time --- examples/image/classify-video.py | 58 ++++++++++++++++++++++++-------- 1 file changed, 44 insertions(+), 14 deletions(-) diff --git a/examples/image/classify-video.py b/examples/image/classify-video.py index cdea9b5..649a944 100644 --- a/examples/image/classify-video.py +++ b/examples/image/classify-video.py @@ -12,12 +12,13 @@ runner = None # if you don't want to see a video preview, set this to False show_camera = True + if (sys.platform == 'linux' and not os.environ.get('DISPLAY')): show_camera = False def help(): - print('python classify-video.py ') + print('python classify-video.py [frame_interval]') def main(argv): try: @@ -31,53 +32,80 @@ def main(argv): help() sys.exit() - if len(args) != 2: + if len(args) < 2 or len(args) > 3: help() sys.exit(2) model = args[0] + video_file = args[1] + dir_path = os.path.dirname(os.path.realpath(__file__)) modelfile = os.path.join(dir_path, model) - print('MODEL: ' + modelfile) + # jump 1 frame at a time by default + frame_interval = 1 + + if len(args) == 3: + try: + frame_interval = int(args[2]) + except ValueError: + frame_interval = None + print('Stepping through %d frames at a time' % frame_interval) + + with ImageImpulseRunner(modelfile) as runner: try: model_info = runner.init() print('Loaded runner for "' + model_info['project']['owner'] + ' / ' + model_info['project']['name'] + '"') labels = model_info['model_parameters']['labels'] - vidcap = cv2.VideoCapture(args[1]) - sec = 0 + vidcap = cv2.VideoCapture(video_file) + #fps = vidcap.get(cv2.CAP_PROP_FPS) + #total_frames = vidcap.get(cv2.CAP_PROP_FRAME_COUNT) + #total_duration = total_frames / fps + #print("File information: FPS: %.2f Frames %d, duration: %.2f" % (fps, total_frames, total_duration)) + + frame_nr = 0 start_time = time.time() - def getFrame(sec): - vidcap.set(cv2.CAP_PROP_POS_MSEC,sec*1000) + def getFrame(id): + #debug_time = time.time() + #print("frame id: %d" % id) + vidcap.set(cv2.CAP_PROP_POS_FRAMES,id) + hasFrames,image = vidcap.read() if hasFrames: + #print("get frame took: %.2f sec" % (time.time() - debug_time)) return image else: - print('Failed to load frame', args[1]) + print('Failed to load frame', video_file) exit(1) - img = getFrame(sec) + + img = getFrame(frame_nr) while img.size != 0: # imread returns images in BGR format, so we need to convert to RGB + #debug_time = time.time() img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) + #print("color convert took: %.2f sec" % (time.time() - debug_time)) # get_features_from_image also takes a crop direction arguments in case you don't have square images + #debug_time = time.time() features, cropped = runner.get_features_from_image(img) + #print("get features took: %.2f sec" % (time.time() - debug_time)) # the image will be resized and cropped, save a copy of the picture here # so you can see what's being passed into the classifier - cv2.imwrite('debug.jpg', cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR)) + #cv2.imwrite('debug.jpg', cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR)) res = runner.classify(features) + #debug_time = time.time() if "classification" in res["result"].keys(): print('Result (%d ms.) ' % (res['timing']['dsp'] + res['timing']['classification']), end='') for label in labels: @@ -90,16 +118,18 @@ def getFrame(sec): for bb in res["result"]["bounding_boxes"]: print('\t%s (%.2f): x=%d y=%d w=%d h=%d' % (bb['label'], bb['value'], bb['x'], bb['y'], bb['width'], bb['height'])) img = cv2.rectangle(cropped, (bb['x'], bb['y']), (bb['x'] + bb['width'], bb['y'] + bb['height']), (255, 0, 0), 1) + #print("process results took: %.2f sec" % (time.time() - debug_time)) if (show_camera): + #debug_time = time.time() cv2.imshow('edgeimpulse', cv2.cvtColor(cropped, cv2.COLOR_RGB2BGR)) if cv2.waitKey(1) == ord('q'): break + #print("show results took: %.2f sec" % (time.time() - debug_time)) + - sec = time.time() - start_time - sec = round(sec, 2) - print("Getting frame at: %.2f sec" % sec) - img = getFrame(sec) + frame_nr += frame_interval + img = getFrame(frame_nr) finally: if (runner): runner.stop()