python/safety_settings.py (53 lines of code) (raw):
# -*- coding: utf-8 -*-
# Copyright 2025 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from absl.testing import absltest
class UnitTests(absltest.TestCase):
def test_safety_settings(self):
# [START safety_settings]
from google import genai
from google.genai import types
client = genai.Client()
unsafe_prompt = (
"I support Martians Soccer Club and I think Jupiterians Football Club sucks! "
"Write a ironic phrase about them including expletives."
)
# Set safety_settings for a single category using a list of SafetySetting.
response = client.models.generate_content(
model="gemini-2.0-flash",
contents=unsafe_prompt,
config=types.GenerateContentConfig(
safety_settings=[
types.SafetySetting(
category="HARM_CATEGORY_HARASSMENT", threshold="BLOCK_ONLY_HIGH"
)
]
),
)
print(response.candidates[0].finish_reason)
print(response.candidates[0].safety_ratings)
# [END safety_settings]
def test_safety_settings_multi(self):
# [START safety_settings_multi]
from google import genai
from google.genai import types
client = genai.Client()
unsafe_prompt = (
"I support Martians Soccer Club and I think Jupiterians Football Club sucks! "
"Write a ironic phrase about them including expletives."
)
response = client.models.generate_content(
model="gemini-2.0-flash",
contents=unsafe_prompt,
config=types.GenerateContentConfig(
safety_settings=[
types.SafetySetting(
category="HARM_CATEGORY_HATE_SPEECH",
threshold="BLOCK_MEDIUM_AND_ABOVE",
),
types.SafetySetting(
category="HARM_CATEGORY_HARASSMENT", threshold="BLOCK_ONLY_HIGH"
),
]
),
)
try:
print(response.text)
except Exception:
print("No information generated by the model.")
print(response.candidates[0].safety_ratings)
# [END safety_settings_multi]
if __name__ == "__main__":
absltest.main()