Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit da04c4b

Browse filesBrowse files
authored
add video streaming samples (GoogleCloudPlatform#2047)
* add video streaming samples * address review comments * flake * flake * flake
1 parent 0acc792 commit da04c4b
Copy full SHA for da04c4b

File tree

Expand file treeCollapse file tree

3 files changed

+438
-3
lines changed
Filter options
Expand file treeCollapse file tree

3 files changed

+438
-3
lines changed

‎video/cloud-client/analyze/beta_snippets.py

Copy file name to clipboardExpand all lines: video/cloud-client/analyze/beta_snippets.py
+357-1Lines changed: 357 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,22 @@
2020
Usage Examples:
2121
python beta_snippets.py transcription \
2222
gs://python-docs-samples-tests/video/googlework_short.mp4
23+
2324
python beta_snippets.py video-text-gcs \
2425
gs://python-docs-samples-tests/video/googlework_short.mp4
25-
python beta_snippets.py track-objects /resources/cat.mp4
26+
27+
python beta_snippets.py track-objects resources/cat.mp4
28+
29+
python beta_snippets.py streaming-labels resources/cat.mp4
30+
31+
python beta_snippets.py streaming-shot-change resources/cat.mp4
32+
33+
python beta_snippets.py streaming-objects resources/cat.mp4
34+
35+
python beta_snippets.py streaming-explicit-content resources/cat.mp4
36+
37+
python beta_snippets.py streaming-annotation-storage resources/cat.mp4 \
38+
gs://mybucket/myfolder
2639
"""
2740

2841
import argparse
@@ -274,6 +287,316 @@ def track_objects(path):
274287
return object_annotations
275288

276289

290+
def detect_labels_streaming(path):
291+
# [START video_streaming_label_detection_beta]
292+
from google.cloud import videointelligence_v1p3beta1 as videointelligence
293+
294+
# path = 'path_to_file'
295+
296+
client = videointelligence.StreamingVideoIntelligenceServiceClient()
297+
298+
# Set streaming config.
299+
config = videointelligence.types.StreamingVideoConfig(
300+
feature=(videointelligence.enums.
301+
StreamingFeature.STREAMING_LABEL_DETECTION))
302+
303+
# config_request should be the first in the stream of requests.
304+
config_request = videointelligence.types.StreamingAnnotateVideoRequest(
305+
video_config=config)
306+
307+
# Set the chunk size to 5MB (recommended less than 10MB).
308+
chunk_size = 5 * 1024 * 1024
309+
310+
# Load file content.
311+
stream = []
312+
with io.open(path, 'rb') as video_file:
313+
while True:
314+
data = video_file.read(chunk_size)
315+
if not data:
316+
break
317+
stream.append(data)
318+
319+
def stream_generator():
320+
yield config_request
321+
for chunk in stream:
322+
yield videointelligence.types.StreamingAnnotateVideoRequest(
323+
input_content=chunk)
324+
325+
requests = stream_generator()
326+
327+
# streaming_annotate_video returns a generator.
328+
responses = client.streaming_annotate_video(requests)
329+
330+
# Each response corresponds to about 1 second of video.
331+
for response in responses:
332+
# Check for errors.
333+
if response.error.message:
334+
print(response.error.message)
335+
break
336+
337+
# Get the time offset of the response.
338+
frame = response.annotation_results.label_annotations[0].frames[0]
339+
time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
340+
print('{}s:'.format(time_offset))
341+
342+
for annotation in response.annotation_results.label_annotations:
343+
description = annotation.entity.description
344+
# Every annotation has only one frame
345+
confidence = annotation.frames[0].confidence
346+
print('\t{} (confidence: {})'.format(description, confidence))
347+
# [END video_streaming_label_detection_beta]
348+
349+
350+
def detect_shot_change_streaming(path):
351+
# [START video_streaming_shot_change_detection_beta]
352+
from google.cloud import videointelligence_v1p3beta1 as videointelligence
353+
354+
# path = 'path_to_file'
355+
356+
client = videointelligence.StreamingVideoIntelligenceServiceClient()
357+
358+
# Set streaming config.
359+
config = videointelligence.types.StreamingVideoConfig(
360+
feature=(videointelligence.enums.StreamingFeature.
361+
STREAMING_SHOT_CHANGE_DETECTION))
362+
363+
# config_request should be the first in the stream of requests.
364+
config_request = videointelligence.types.StreamingAnnotateVideoRequest(
365+
video_config=config)
366+
367+
# Set the chunk size to 5MB (recommended less than 10MB).
368+
chunk_size = 5 * 1024 * 1024
369+
370+
# Load file content.
371+
stream = []
372+
with io.open(path, 'rb') as video_file:
373+
while True:
374+
data = video_file.read(chunk_size)
375+
if not data:
376+
break
377+
stream.append(data)
378+
379+
def stream_generator():
380+
yield config_request
381+
for chunk in stream:
382+
yield videointelligence.types.StreamingAnnotateVideoRequest(
383+
input_content=chunk)
384+
385+
requests = stream_generator()
386+
387+
# streaming_annotate_video returns a generator.
388+
responses = client.streaming_annotate_video(requests)
389+
390+
# Each response corresponds to about 1 second of video.
391+
for response in responses:
392+
# Check for errors.
393+
if response.error.message:
394+
print(response.error.message)
395+
break
396+
397+
for annotation in response.annotation_results.shot_annotations:
398+
start = (annotation.start_time_offset.seconds +
399+
annotation.start_time_offset.nanos / 1e9)
400+
end = (annotation.end_time_offset.seconds +
401+
annotation.end_time_offset.nanos / 1e9)
402+
403+
print('Shot: {}s to {}s'.format(start, end))
404+
# [END video_streaming_shot_change_detection_beta]
405+
406+
407+
def track_objects_streaming(path):
408+
# [START video_streaming_object_tracking_beta]
409+
from google.cloud import videointelligence_v1p3beta1 as videointelligence
410+
411+
# path = 'path_to_file'
412+
413+
client = videointelligence.StreamingVideoIntelligenceServiceClient()
414+
415+
# Set streaming config.
416+
config = videointelligence.types.StreamingVideoConfig(
417+
feature=(videointelligence.enums.
418+
StreamingFeature.STREAMING_OBJECT_TRACKING))
419+
420+
# config_request should be the first in the stream of requests.
421+
config_request = videointelligence.types.StreamingAnnotateVideoRequest(
422+
video_config=config)
423+
424+
# Set the chunk size to 5MB (recommended less than 10MB).
425+
chunk_size = 5 * 1024 * 1024
426+
427+
# Load file content.
428+
stream = []
429+
with io.open(path, 'rb') as video_file:
430+
while True:
431+
data = video_file.read(chunk_size)
432+
if not data:
433+
break
434+
stream.append(data)
435+
436+
def stream_generator():
437+
yield config_request
438+
for chunk in stream:
439+
yield videointelligence.types.StreamingAnnotateVideoRequest(
440+
input_content=chunk)
441+
442+
requests = stream_generator()
443+
444+
# streaming_annotate_video returns a generator.
445+
responses = client.streaming_annotate_video(requests)
446+
447+
# Each response corresponds to about 1 second of video.
448+
for response in responses:
449+
# Check for errors.
450+
if response.error.message:
451+
print(response.error.message)
452+
break
453+
454+
# Get the time offset of the response.
455+
frame = response.annotation_results.object_annotations[0].frames[0]
456+
time_offset = frame.time_offset.seconds + frame.time_offset.nanos / 1e9
457+
print('{}s:'.format(time_offset))
458+
459+
for annotation in response.annotation_results.object_annotations:
460+
description = annotation.entity.description
461+
confidence = annotation.confidence
462+
463+
# track_id tracks the same object in the video.
464+
track_id = annotation.track_id
465+
466+
print('\tEntity description: {}'.format(description))
467+
print('\tTrack Id: {}'.format(track_id))
468+
if annotation.entity.entity_id:
469+
print('\tEntity id: {}'.format(annotation.entity.entity_id))
470+
471+
print('\tConfidence: {}'.format(confidence))
472+
473+
# Every annotation has only one frame
474+
frame = annotation.frames[0]
475+
box = frame.normalized_bounding_box
476+
print('\tBounding box position:')
477+
print('\tleft : {}'.format(box.left))
478+
print('\ttop : {}'.format(box.top))
479+
print('\tright : {}'.format(box.right))
480+
print('\tbottom: {}\n'.format(box.bottom))
481+
# [END video_streaming_object_tracking_beta]
482+
483+
484+
def detect_explicit_content_streaming(path):
485+
# [START video_streaming_explicit_content_detection_beta]
486+
from google.cloud import videointelligence_v1p3beta1 as videointelligence
487+
488+
# path = 'path_to_file'
489+
490+
client = videointelligence.StreamingVideoIntelligenceServiceClient()
491+
492+
# Set streaming config.
493+
config = videointelligence.types.StreamingVideoConfig(
494+
feature=(videointelligence.enums.StreamingFeature.
495+
STREAMING_EXPLICIT_CONTENT_DETECTION))
496+
497+
# config_request should be the first in the stream of requests.
498+
config_request = videointelligence.types.StreamingAnnotateVideoRequest(
499+
video_config=config)
500+
501+
# Set the chunk size to 5MB (recommended less than 10MB).
502+
chunk_size = 5 * 1024 * 1024
503+
504+
# Load file content.
505+
stream = []
506+
with io.open(path, 'rb') as video_file:
507+
while True:
508+
data = video_file.read(chunk_size)
509+
if not data:
510+
break
511+
stream.append(data)
512+
513+
def stream_generator():
514+
yield config_request
515+
for chunk in stream:
516+
yield videointelligence.types.StreamingAnnotateVideoRequest(
517+
input_content=chunk)
518+
519+
requests = stream_generator()
520+
521+
# streaming_annotate_video returns a generator.
522+
responses = client.streaming_annotate_video(requests)
523+
524+
# Each response corresponds to about 1 second of video.
525+
for response in responses:
526+
# Check for errors.
527+
if response.error.message:
528+
print(response.error.message)
529+
break
530+
531+
for frame in response.annotation_results.explicit_annotation.frames:
532+
time_offset = (frame.time_offset.seconds +
533+
frame.time_offset.nanos / 1e9)
534+
pornography_likelihood = videointelligence.enums.Likelihood(
535+
frame.pornography_likelihood)
536+
537+
print('Time: {}s'.format(time_offset))
538+
print('\tpornogaphy: {}'.format(pornography_likelihood.name))
539+
# [END video_streaming_explicit_content_detection_beta]
540+
541+
542+
def annotation_to_storage_streaming(path, output_uri):
543+
# [START video_streaming_annotation_to_storage_beta]
544+
from google.cloud import videointelligence_v1p3beta1 as videointelligence
545+
546+
# path = 'path_to_file'
547+
# output_uri = 'gs://path_to_output'
548+
549+
client = videointelligence.StreamingVideoIntelligenceServiceClient()
550+
551+
# Set streaming config specifying the output_uri.
552+
# The output_uri is the prefix of the actual output files.
553+
storage_config = videointelligence.types.StreamingStorageConfig(
554+
enable_storage_annotation_result=True,
555+
annotation_result_storage_directory=output_uri)
556+
# Here we use label detection as an example.
557+
# All features support output to GCS.
558+
config = videointelligence.types.StreamingVideoConfig(
559+
feature=(videointelligence.enums.
560+
StreamingFeature.STREAMING_LABEL_DETECTION),
561+
storage_config=storage_config)
562+
563+
# config_request should be the first in the stream of requests.
564+
config_request = videointelligence.types.StreamingAnnotateVideoRequest(
565+
video_config=config)
566+
567+
# Set the chunk size to 5MB (recommended less than 10MB).
568+
chunk_size = 5 * 1024 * 1024
569+
570+
# Load file content.
571+
stream = []
572+
with io.open(path, 'rb') as video_file:
573+
while True:
574+
data = video_file.read(chunk_size)
575+
if not data:
576+
break
577+
stream.append(data)
578+
579+
def stream_generator():
580+
yield config_request
581+
for chunk in stream:
582+
yield videointelligence.types.StreamingAnnotateVideoRequest(
583+
input_content=chunk)
584+
585+
requests = stream_generator()
586+
587+
# streaming_annotate_video returns a generator.
588+
responses = client.streaming_annotate_video(requests)
589+
590+
for response in responses:
591+
# Check for errors.
592+
if response.error.message:
593+
print(response.error.message)
594+
break
595+
596+
print('Storage URI: {}'.format(response.annotation_results_uri))
597+
# [END video_streaming_annotation_to_storage_beta]
598+
599+
277600
if __name__ == '__main__':
278601
parser = argparse.ArgumentParser(
279602
description=__doc__,
@@ -300,6 +623,29 @@ def track_objects(path):
300623
'track-objects', help=track_objects.__doc__)
301624
video_object_tracking_parser.add_argument('path')
302625

626+
video_streaming_labels_parser = subparsers.add_parser(
627+
'streaming-labels', help=detect_labels_streaming.__doc__)
628+
video_streaming_labels_parser.add_argument('path')
629+
630+
video_streaming_shot_change_parser = subparsers.add_parser(
631+
'streaming-shot-change', help=detect_shot_change_streaming.__doc__)
632+
video_streaming_shot_change_parser.add_argument('path')
633+
634+
video_streaming_objects_parser = subparsers.add_parser(
635+
'streaming-objects', help=track_objects_streaming.__doc__)
636+
video_streaming_objects_parser.add_argument('path')
637+
638+
video_streaming_explicit_content_parser = subparsers.add_parser(
639+
'streaming-explicit-content',
640+
help=detect_explicit_content_streaming.__doc__)
641+
video_streaming_explicit_content_parser.add_argument('path')
642+
643+
video_streaming_annotation_to_storage_parser = subparsers.add_parser(
644+
'streaming-annotation-storage',
645+
help=annotation_to_storage_streaming.__doc__)
646+
video_streaming_annotation_to_storage_parser.add_argument('path')
647+
video_streaming_annotation_to_storage_parser.add_argument('output_uri')
648+
303649
args = parser.parse_args()
304650

305651
if args.command == 'transcription':
@@ -312,3 +658,13 @@ def track_objects(path):
312658
track_objects_gcs(args.gcs_uri)
313659
elif args.command == 'track-objects':
314660
track_objects(args.path)
661+
elif args.command == 'streaming-labels':
662+
detect_labels_streaming(args.path)
663+
elif args.command == 'streaming-shot-change':
664+
detect_shot_change_streaming(args.path)
665+
elif args.command == 'streaming-objects':
666+
track_objects_streaming(args.path)
667+
elif args.command == 'streaming-explicit-content':
668+
detect_explicit_content_streaming(args.path)
669+
elif args.command == 'streaming-annotation-storage':
670+
annotation_to_storage_streaming(args.path, args.output_uri)

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.