Skip to content

Commit 178a522

Browse files
authored
[Vertex AI] Add HarmBlockMethod enum and method property (#13876)
1 parent 9cb9895 commit 178a522

File tree

3 files changed

+40
-5
lines changed

3 files changed

+40
-5
lines changed

FirebaseVertexAI/CHANGELOG.md

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,9 @@
6363
(#13875)
6464
- [added] Added a new `HarmBlockThreshold` `.off`, which turns off the safety
6565
filter. (#13863)
66+
- [added] Added an optional `HarmBlockMethod` parameter `method` in
67+
`SafetySetting` that configures whether responses are blocked based on the
68+
`probability` and/or `severity` of content being in a `HarmCategory`. (#13876)
6669
- [added] Added new `FinishReason` values `.blocklist`, `.prohibitedContent`,
6770
`.spii` and `.malformedFunctionCall` that may be reported. (#13860)
6871
- [added] Added new `BlockReason` values `.blocklist` and `.prohibitedContent`

FirebaseVertexAI/Sources/Safety.swift

Lines changed: 33 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -173,9 +173,26 @@ public struct SafetySetting {
173173
let rawValue: String
174174
}
175175

176+
/// The method of computing whether the ``SafetySetting/HarmBlockThreshold`` has been exceeded.
177+
public struct HarmBlockMethod: EncodableProtoEnum, Sendable {
178+
enum Kind: String {
179+
case severity = "SEVERITY"
180+
case probability = "PROBABILITY"
181+
}
182+
183+
/// Use both probability and severity scores.
184+
public static let severity = HarmBlockMethod(kind: .severity)
185+
186+
/// Use only the probability score.
187+
public static let probability = HarmBlockMethod(kind: .probability)
188+
189+
let rawValue: String
190+
}
191+
176192
enum CodingKeys: String, CodingKey {
177193
case harmCategory = "category"
178194
case threshold
195+
case method
179196
}
180197

181198
/// The category this safety setting should be applied to.
@@ -184,10 +201,25 @@ public struct SafetySetting {
184201
/// The threshold describing what content should be blocked.
185202
public let threshold: HarmBlockThreshold
186203

204+
/// The method of computing whether the ``threshold`` has been exceeded.
205+
public let method: HarmBlockMethod?
206+
187207
/// Initializes a new safety setting with the given category and threshold.
188-
public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold) {
208+
///
209+
/// - Parameters:
210+
/// - harmCategory: The category this safety setting should be applied to.
211+
/// - threshold: The threshold describing what content should be blocked.
212+
/// - method: The method of computing whether the threshold has been exceeded; if not specified,
213+
/// the default method is ``HarmBlockMethod/severity`` for most models. See [harm block
214+
/// methods](https://cloud.google.com/vertex-ai/generative-ai/docs/multimodal/configure-safety-filters#how_to_configure_safety_filters)
215+
/// in the Google Cloud documentation for more details.
216+
/// > Note: For models older than `gemini-1.5-flash` and `gemini-1.5-pro`, the default method
217+
/// > is ``HarmBlockMethod/probability``.
218+
public init(harmCategory: HarmCategory, threshold: HarmBlockThreshold,
219+
method: HarmBlockMethod? = nil) {
189220
self.harmCategory = harmCategory
190221
self.threshold = threshold
222+
self.method = method
191223
}
192224
}
193225

FirebaseVertexAI/Tests/Integration/IntegrationTests.swift

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -30,8 +30,8 @@ final class IntegrationTests: XCTestCase {
3030
parts: "You are a friendly and helpful assistant."
3131
)
3232
let safetySettings = [
33-
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove),
34-
SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove),
33+
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .probability),
34+
SafetySetting(harmCategory: .hateSpeech, threshold: .blockLowAndAbove, method: .severity),
3535
SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockLowAndAbove),
3636
SafetySetting(harmCategory: .dangerousContent, threshold: .blockLowAndAbove),
3737
SafetySetting(harmCategory: .civicIntegrity, threshold: .blockLowAndAbove),
@@ -89,11 +89,11 @@ final class IntegrationTests: XCTestCase {
8989
modelName: "gemini-1.5-pro",
9090
generationConfig: generationConfig,
9191
safetySettings: [
92-
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove),
92+
SafetySetting(harmCategory: .harassment, threshold: .blockLowAndAbove, method: .severity),
9393
SafetySetting(harmCategory: .hateSpeech, threshold: .blockMediumAndAbove),
9494
SafetySetting(harmCategory: .sexuallyExplicit, threshold: .blockOnlyHigh),
9595
SafetySetting(harmCategory: .dangerousContent, threshold: .blockNone),
96-
SafetySetting(harmCategory: .civicIntegrity, threshold: .off),
96+
SafetySetting(harmCategory: .civicIntegrity, threshold: .off, method: .probability),
9797
],
9898
toolConfig: .init(functionCallingConfig: .auto()),
9999
systemInstruction: systemInstruction

0 commit comments

Comments
 (0)