GCP Vision API , Video-Intelligence API

Vision API

Step 1: Enable Vision API

Step 2: Create a service account and download the JSON key file

Step 3: Run the command

export GOOGLE_APPLICATION_CREDENTIALS="path_to_key.json"

Step 4: vision API python script

import io
import os

# Imports the Google Cloud client library
from google.cloud import vision

# Instantiates a client
client = vision.ImageAnnotatorClient()

# The name of the image file to annotate
file_name = os.path.abspath(r'path to image.jpg')

# Loads the image into memory
with io.open(file_name, 'rb') as image_file:
    content = image_file.read()

image = vision.Image(content=content)

# Performs label detection on the image file
response = client.label_detection(image=image)
labels = response.label_annotations

print('Labels:')
for label in labels:
    print(label.description)

#### FACE DETECTION ######

response_face = client.face_detection(image=image)

face_data = []

for face_detection in response_face.face_annotations:
    d = {
        'confidence': face_detection.detection_confidence,
        'joy': face_detection.joy_likelihood,
        'sorrow': face_detection.sorrow_likelihood,
        'surprise': face_detection.surprise_likelihood,
        'anger': face_detection.anger_likelihood
    }
    print(d)

#### IMAGE PROPERTIES ######

response_image = client.image_properties(image=image)

image_data = []

for c in response_image.image_properties_annotation.dominant_colors.colors[:3]:
    d = {
        'color': c.color,
        'score': c.score,
        'pixel_fraction': c.pixel_fraction
    }
    print(d)

#### TEXT DETECTION ######

response_text = client.text_detection(image=image)

for r in response_text.text_annotations:
    d = {
        'text': r.description
    }
    print(d)

Video Intelligence API

Step 1: Enable Video Intelligence API

Step 2: Create a service account and download the JSON key file

Step 3: Run the command

export GOOGLE_APPLICATION_CREDENTIALS="path_to_key.json"

Step 4: Video Intelligence API python script

from google.cloud import videointelligence_v1p3beta1 as videointelligence
import io
path = 'path_to_video.mp4'

client = videointelligence.StreamingVideoIntelligenceServiceClient()

# Set streaming config.
config = videointelligence.StreamingVideoConfig(
    feature=(videointelligence.StreamingFeature.STREAMING_LABEL_DETECTION)
)

# config_request should be the first in the stream of requests.
config_request = videointelligence.StreamingAnnotateVideoRequest(
    video_config=config
)

# Set the chunk size to 5MB (recommended less than 10MB).
chunk_size = 5 * 1024 * 1024

# Load file content.
stream = []
with io.open(path, "rb") as video_file:
    while True:
        data = video_file.read(chunk_size)
        if not data:
            break
        stream.append(data)

def stream_generator():
    yield config_request
    for chunk in stream:
        yield videointelligence.StreamingAnnotateVideoRequest(input_content=chunk)

requests = stream_generator()

# streaming_annotate_video returns a generator.
# The default timeout is about 300 seconds.
# To process longer videos it should be set to
# larger than the length (in seconds) of the stream.
responses = client.streaming_annotate_video(requests, timeout=600)

# Each response corresponds to about 1 second of video.
for response in responses:
    # Check for errors.
    if response.error.message:
        print(response.error.message)
        break

    label_annotations = response.annotation_results.label_annotations

    # label_annotations could be empty
    if not label_annotations:
        continue

    for annotation in label_annotations:
        # Each annotation has one frame, which has a timeoffset.
        frame = annotation.frames[0]
        time_offset = (
            frame.time_offset.seconds + frame.time_offset.microseconds / 1e6
        )

        description = annotation.entity.description
        confidence = annotation.frames[0].confidence
        # description is in Unicode
        print(
            "{}s: {} (confidence: {})".format(time_offset, description, confidence)
        )

Translation

Step 1: Enable Translation API

Step 2: Create a service account and download the JSON key file

Step 3: Run the command

export GOOGLE_APPLICATION_CREDENTIALS="path_to_key.json"

Step 4: Translation API python script

def translate_text(target, text):
    """Translates text into the target language.

    The target must be an ISO 639-1 language code.
    See https://g.co/cloud/translate/v2/translate-reference#supported_languages
    """
    import six
    from google.cloud import translate_v2 as translate

    translate_client = translate.Client()

    if isinstance(text, six.binary_type):
        text = text.decode("utf-8")

    # Text can also be a sequence of strings, in which case this method
    # will return a sequence of results for each text.
    result = translate_client.translate(text, target_language=target)

    print(u"Text: {}".format(result["input"]))
    print(u"Translation: {}".format(result["translatedText"]))
    print(u"Detected source language: {}".format(result["detectedSourceLanguage"]))


translate_text("hi","Hi There")