Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit e59e15b

Browse filesBrowse files
dizcologychenyumic
authored andcommitted
[DO NOT MERGE] remove face detection samples (GoogleCloudPlatform#1435)
* remove face detection samples * update docstring * linter * linter
1 parent 50c3bcd commit e59e15b
Copy full SHA for e59e15b

File tree

Expand file treeCollapse file tree

2 files changed

+5
-141
lines changed
Filter options
Expand file treeCollapse file tree

2 files changed

+5
-141
lines changed

‎video/cloud-client/analyze/beta_snippets.py

Copy file name to clipboardExpand all lines: video/cloud-client/analyze/beta_snippets.py
+3-122Lines changed: 3 additions & 122 deletions
Original file line numberDiff line numberDiff line change
@@ -14,16 +14,10 @@
1414
# See the License for the specific language governing permissions and
1515
# limitations under the License.
1616

17-
"""This application demonstrates face detection, face emotions
18-
and speech transcription using the Google Cloud API.
17+
"""This application demonstrates speech transcription using the
18+
Google Cloud API.
1919
2020
Usage Examples:
21-
python beta_snippets.py boxes \
22-
gs://python-docs-samples-tests/video/googlework_short.mp4
23-
24-
python beta_snippets.py \
25-
emotions gs://python-docs-samples-tests/video/googlework_short.mp4
26-
2721
python beta_snippets.py \
2822
transcription gs://python-docs-samples-tests/video/googlework_short.mp4
2923
"""
@@ -33,108 +27,6 @@
3327
from google.cloud import videointelligence_v1p1beta1 as videointelligence
3428

3529

36-
# [START video_face_bounding_boxes]
37-
def face_bounding_boxes(gcs_uri):
38-
""" Detects faces' bounding boxes. """
39-
video_client = videointelligence.VideoIntelligenceServiceClient()
40-
features = [videointelligence.enums.Feature.FACE_DETECTION]
41-
42-
config = videointelligence.types.FaceConfig(
43-
include_bounding_boxes=True)
44-
context = videointelligence.types.VideoContext(
45-
face_detection_config=config)
46-
47-
operation = video_client.annotate_video(
48-
gcs_uri, features=features, video_context=context)
49-
print('\nProcessing video for face annotations:')
50-
51-
result = operation.result(timeout=900)
52-
print('\nFinished processing.')
53-
54-
# There is only one result because a single video was processed.
55-
faces = result.annotation_results[0].face_detection_annotations
56-
for i, face in enumerate(faces):
57-
print('Face {}'.format(i))
58-
59-
# Each face_detection_annotation has only one segment.
60-
segment = face.segments[0]
61-
start_time = (segment.segment.start_time_offset.seconds +
62-
segment.segment.start_time_offset.nanos / 1e9)
63-
end_time = (segment.segment.end_time_offset.seconds +
64-
segment.segment.end_time_offset.nanos / 1e9)
65-
positions = '{}s to {}s'.format(start_time, end_time)
66-
print('\tSegment: {}\n'.format(positions))
67-
68-
# Each detected face may appear in many frames of the video.
69-
# Here we process only the first frame.
70-
frame = face.frames[0]
71-
72-
time_offset = (frame.time_offset.seconds +
73-
frame.time_offset.nanos / 1e9)
74-
box = frame.attributes[0].normalized_bounding_box
75-
76-
print('First frame time offset: {}s\n'.format(time_offset))
77-
78-
print('First frame normalized bounding box:')
79-
print('\tleft : {}'.format(box.left))
80-
print('\ttop : {}'.format(box.top))
81-
print('\tright : {}'.format(box.right))
82-
print('\tbottom: {}'.format(box.bottom))
83-
print('\n')
84-
# [END video_face_bounding_boxes]
85-
86-
87-
# [START video_face_emotions]
88-
def face_emotions(gcs_uri):
89-
""" Analyze faces' emotions over frames. """
90-
video_client = videointelligence.VideoIntelligenceServiceClient()
91-
features = [videointelligence.enums.Feature.FACE_DETECTION]
92-
93-
config = videointelligence.types.FaceConfig(
94-
include_emotions=True)
95-
context = videointelligence.types.VideoContext(
96-
face_detection_config=config)
97-
98-
operation = video_client.annotate_video(
99-
gcs_uri, features=features, video_context=context)
100-
print('\nProcessing video for face annotations:')
101-
102-
result = operation.result(timeout=600)
103-
print('\nFinished processing.')
104-
105-
# There is only one result because a single video was processed.
106-
faces = result.annotation_results[0].face_detection_annotations
107-
for i, face in enumerate(faces):
108-
for j, frame in enumerate(face.frames):
109-
time_offset = (frame.time_offset.seconds +
110-
frame.time_offset.nanos / 1e9)
111-
emotions = frame.attributes[0].emotions
112-
113-
print('Face {}, frame {}, time_offset {}\n'.format(
114-
i, j, time_offset))
115-
116-
# from videointelligence.enums
117-
emotion_labels = (
118-
'EMOTION_UNSPECIFIED', 'AMUSEMENT', 'ANGER',
119-
'CONCENTRATION', 'CONTENTMENT', 'DESIRE',
120-
'DISAPPOINTMENT', 'DISGUST', 'ELATION',
121-
'EMBARRASSMENT', 'INTEREST', 'PRIDE', 'SADNESS',
122-
'SURPRISE')
123-
124-
for emotion in emotions:
125-
emotion_index = emotion.emotion
126-
emotion_label = emotion_labels[emotion_index]
127-
emotion_score = emotion.score
128-
129-
print('emotion: {} (confidence score: {})'.format(
130-
emotion_label, emotion_score))
131-
132-
print('\n')
133-
134-
print('\n')
135-
# [END video_face_emotions]
136-
137-
13830
# [START video_speech_transcription]
13931
def speech_transcription(input_uri):
14032
"""Transcribe speech from a video stored on GCS."""
@@ -181,23 +73,12 @@ def speech_transcription(input_uri):
18173
description=__doc__,
18274
formatter_class=argparse.RawDescriptionHelpFormatter)
18375
subparsers = parser.add_subparsers(dest='command')
184-
analyze_faces_parser = subparsers.add_parser(
185-
'boxes', help=face_bounding_boxes.__doc__)
186-
analyze_faces_parser.add_argument('gcs_uri')
187-
188-
analyze_emotions_parser = subparsers.add_parser(
189-
'emotions', help=face_emotions.__doc__)
190-
analyze_emotions_parser.add_argument('gcs_uri')
19176

19277
speech_transcription_parser = subparsers.add_parser(
19378
'transcription', help=speech_transcription.__doc__)
19479
speech_transcription_parser.add_argument('gcs_uri')
19580

19681
args = parser.parse_args()
19782

198-
if args.command == 'boxes':
199-
face_bounding_boxes(args.gcs_uri)
200-
elif args.command == 'emotions':
201-
face_emotions(args.gcs_uri)
202-
elif args.command == 'transcription':
83+
if args.command == 'transcription':
20384
speech_transcription(args.gcs_uri)

‎video/cloud-client/analyze/beta_snippets_test.py

Copy file name to clipboardExpand all lines: video/cloud-client/analyze/beta_snippets_test.py
+2-19Lines changed: 2 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -20,30 +20,13 @@
2020

2121
import beta_snippets
2222

23-
2423
BUCKET = os.environ['CLOUD_STORAGE_BUCKET']
25-
FACES_SHORT_FILE_PATH = 'video/googlework_short.mp4'
26-
27-
28-
@pytest.mark.slow
29-
def test_face_bounding_boxes(capsys):
30-
beta_snippets.face_bounding_boxes(
31-
'gs://{}/{}'.format(BUCKET, FACES_SHORT_FILE_PATH))
32-
out, _ = capsys.readouterr()
33-
assert 'top :' in out
34-
35-
36-
@pytest.mark.slow
37-
def test_face_emotions(capsys):
38-
beta_snippets.face_emotions(
39-
'gs://{}/{}'.format(BUCKET, FACES_SHORT_FILE_PATH))
40-
out, _ = capsys.readouterr()
41-
assert 'CONCENTRATION' in out
24+
FILE_PATH = 'video/googlework_short.mp4'
4225

4326

4427
@pytest.mark.slow
4528
def test_speech_transcription(capsys):
4629
beta_snippets.speech_transcription(
47-
'gs://{}/{}'.format(BUCKET, FACES_SHORT_FILE_PATH))
30+
'gs://{}/{}'.format(BUCKET, FILE_PATH))
4831
out, _ = capsys.readouterr()
4932
assert 'cultural' in out

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.