public interface SafetyRatingOrBuilder
extends com.google.protobuf.MessageOrBuilder
| Modifier and Type | Method and Description |
|---|---|
boolean |
getBlocked()
Output only.
|
HarmCategory |
getCategory()
Output only.
|
int |
getCategoryValue()
Output only.
|
SafetyRating.HarmProbability |
getProbability()
Output only.
|
float |
getProbabilityScore()
Output only.
|
int |
getProbabilityValue()
Output only.
|
SafetyRating.HarmSeverity |
getSeverity()
Output only.
|
float |
getSeverityScore()
Output only.
|
int |
getSeverityValue()
Output only.
|
findInitializationErrors, getAllFields, getDefaultInstanceForType, getDescriptorForType, getField, getInitializationErrorString, getOneofFieldDescriptor, getRepeatedField, getRepeatedFieldCount, getUnknownFields, hasField, hasOneofint getCategoryValue()
Output only. Harm category.
.google.cloud.aiplatform.v1.HarmCategory category = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
HarmCategory getCategory()
Output only. Harm category.
.google.cloud.aiplatform.v1.HarmCategory category = 1 [(.google.api.field_behavior) = OUTPUT_ONLY];
int getProbabilityValue()
Output only. Harm probability levels in the content.
.google.cloud.aiplatform.v1.SafetyRating.HarmProbability probability = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
SafetyRating.HarmProbability getProbability()
Output only. Harm probability levels in the content.
.google.cloud.aiplatform.v1.SafetyRating.HarmProbability probability = 2 [(.google.api.field_behavior) = OUTPUT_ONLY];
float getProbabilityScore()
Output only. Harm probability score.
float probability_score = 5 [(.google.api.field_behavior) = OUTPUT_ONLY];int getSeverityValue()
Output only. Harm severity levels in the content.
.google.cloud.aiplatform.v1.SafetyRating.HarmSeverity severity = 6 [(.google.api.field_behavior) = OUTPUT_ONLY];
SafetyRating.HarmSeverity getSeverity()
Output only. Harm severity levels in the content.
.google.cloud.aiplatform.v1.SafetyRating.HarmSeverity severity = 6 [(.google.api.field_behavior) = OUTPUT_ONLY];
float getSeverityScore()
Output only. Harm severity score.
float severity_score = 7 [(.google.api.field_behavior) = OUTPUT_ONLY];boolean getBlocked()
Output only. Indicates whether the content was filtered out because of this rating.
bool blocked = 3 [(.google.api.field_behavior) = OUTPUT_ONLY];Copyright © 2024 Google LLC. All rights reserved.