Struct gapi_grpc::google::cloud::videointelligence::v1p3beta1::VideoAnnotationResults [−][src]
Annotation results for a single video.
Fields
input_uri: String
Video file location in Cloud Storage.
segment: Option<VideoSegment>
Video segment on which the annotation is run.
segment_label_annotations: Vec<LabelAnnotation>
Topical label annotations on video level or user-specified segment level. There is exactly one element for each unique label.
segment_presence_label_annotations: Vec<LabelAnnotation>
Presence label annotations on video level or user-specified segment level.
There is exactly one element for each unique label. Compared to the
existing topical segment_label_annotations
, this field presents more
fine-grained, segment-level labels detected in video content and is made
available only when the client sets LabelDetectionConfig.model
to
“builtin/latest” in the request.
shot_label_annotations: Vec<LabelAnnotation>
Topical label annotations on shot level. There is exactly one element for each unique label.
shot_presence_label_annotations: Vec<LabelAnnotation>
Presence label annotations on shot level. There is exactly one element for
each unique label. Compared to the existing topical
shot_label_annotations
, this field presents more fine-grained, shot-level
labels detected in video content and is made available only when the client
sets LabelDetectionConfig.model
to “builtin/latest” in the request.
frame_label_annotations: Vec<LabelAnnotation>
Label annotations on frame level. There is exactly one element for each unique label.
face_detection_annotations: Vec<FaceDetectionAnnotation>
Face detection annotations.
shot_annotations: Vec<VideoSegment>
Shot annotations. Each shot is represented as a video segment.
explicit_annotation: Option<ExplicitContentAnnotation>
Explicit content annotation.
speech_transcriptions: Vec<SpeechTranscription>
Speech transcription.
text_annotations: Vec<TextAnnotation>
OCR text detection and tracking. Annotations for list of detected text snippets. Each will have list of frame information associated with it.
object_annotations: Vec<ObjectTrackingAnnotation>
Annotations for list of objects detected and tracked in video.
logo_recognition_annotations: Vec<LogoRecognitionAnnotation>
Annotations for list of logos detected, tracked and recognized in video.
person_detection_annotations: Vec<PersonDetectionAnnotation>
Person detection annotations.
celebrity_recognition_annotations: Option<CelebrityRecognitionAnnotation>
Celebrity recognition annotations.
error: Option<Status>
If set, indicates an error. Note that for a single AnnotateVideoRequest
some videos may succeed and some may fail.
Trait Implementations
impl Clone for VideoAnnotationResults
[src]
fn clone(&self) -> VideoAnnotationResults
[src]
pub fn clone_from(&mut self, source: &Self)
1.0.0[src]
impl Debug for VideoAnnotationResults
[src]
impl Default for VideoAnnotationResults
[src]
fn default() -> VideoAnnotationResults
[src]
impl Message for VideoAnnotationResults
[src]
fn encode_raw<B>(&self, buf: &mut B) where
B: BufMut,
[src]
B: BufMut,
fn merge_field<B>(
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext
) -> Result<(), DecodeError> where
B: Buf,
[src]
&mut self,
tag: u32,
wire_type: WireType,
buf: &mut B,
ctx: DecodeContext
) -> Result<(), DecodeError> where
B: Buf,
fn encoded_len(&self) -> usize
[src]
fn clear(&mut self)
[src]
pub fn encode<B>(&self, buf: &mut B) -> Result<(), EncodeError> where
B: BufMut,
[src]
B: BufMut,
pub fn encode_length_delimited<B>(&self, buf: &mut B) -> Result<(), EncodeError> where
B: BufMut,
[src]
B: BufMut,
pub fn decode<B>(buf: B) -> Result<Self, DecodeError> where
Self: Default,
B: Buf,
[src]
Self: Default,
B: Buf,
pub fn decode_length_delimited<B>(buf: B) -> Result<Self, DecodeError> where
Self: Default,
B: Buf,
[src]
Self: Default,
B: Buf,
pub fn merge<B>(&mut self, buf: B) -> Result<(), DecodeError> where
B: Buf,
[src]
B: Buf,
pub fn merge_length_delimited<B>(&mut self, buf: B) -> Result<(), DecodeError> where
B: Buf,
[src]
B: Buf,
impl PartialEq<VideoAnnotationResults> for VideoAnnotationResults
[src]
fn eq(&self, other: &VideoAnnotationResults) -> bool
[src]
fn ne(&self, other: &VideoAnnotationResults) -> bool
[src]
impl StructuralPartialEq for VideoAnnotationResults
[src]
Auto Trait Implementations
impl RefUnwindSafe for VideoAnnotationResults
impl Send for VideoAnnotationResults
impl Sync for VideoAnnotationResults
impl Unpin for VideoAnnotationResults
impl UnwindSafe for VideoAnnotationResults
Blanket Implementations
impl<T> Any for T where
T: 'static + ?Sized,
[src]
T: 'static + ?Sized,
impl<T> Borrow<T> for T where
T: ?Sized,
[src]
T: ?Sized,
impl<T> BorrowMut<T> for T where
T: ?Sized,
[src]
T: ?Sized,
pub fn borrow_mut(&mut self) -> &mut T
[src]
impl<T> From<T> for T
[src]
impl<T> Instrument for T
[src]
pub fn instrument(self, span: Span) -> Instrumented<Self>
[src]
pub fn in_current_span(self) -> Instrumented<Self>
[src]
impl<T> Instrument for T
[src]
pub fn instrument(self, span: Span) -> Instrumented<Self>
[src]
pub fn in_current_span(self) -> Instrumented<Self>
[src]
impl<T, U> Into<U> for T where
U: From<T>,
[src]
U: From<T>,
impl<T> IntoRequest<T> for T
[src]
pub fn into_request(self) -> Request<T>
[src]
impl<T> ToOwned for T where
T: Clone,
[src]
T: Clone,
type Owned = T
The resulting type after obtaining ownership.
pub fn to_owned(&self) -> T
[src]
pub fn clone_into(&self, target: &mut T)
[src]
impl<T, U> TryFrom<U> for T where
U: Into<T>,
[src]
U: Into<T>,
type Error = Infallible
The type returned in the event of a conversion error.
pub fn try_from(value: U) -> Result<T, <T as TryFrom<U>>::Error>
[src]
impl<T, U> TryInto<U> for T where
U: TryFrom<T>,
[src]
U: TryFrom<T>,
type Error = <U as TryFrom<T>>::Error
The type returned in the event of a conversion error.
pub fn try_into(self) -> Result<U, <U as TryFrom<T>>::Error>
[src]
impl<V, T> VZip<V> for T where
V: MultiLane<T>,
[src]
V: MultiLane<T>,
impl<T> WithSubscriber for T
[src]
pub fn with_subscriber<S>(self, subscriber: S) -> WithDispatch<Self> where
S: Into<Dispatch>,
[src]
S: Into<Dispatch>,