/*
 * Copyright 2025 Google LLC
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package genai.safety;

// [START googlegenaisdk_safety_with_txt]

import com.google.genai.Client;
import com.google.genai.types.Candidate;
import com.google.genai.types.Content;
import com.google.genai.types.GenerateContentConfig;
import com.google.genai.types.GenerateContentResponse;
import com.google.genai.types.HarmBlockThreshold;
import com.google.genai.types.HarmCategory;
import com.google.genai.types.HttpOptions;
import com.google.genai.types.Part;
import com.google.genai.types.SafetySetting;
import java.util.List;
import java.util.stream.Collectors;

public class SafetyWithTxt {

  public static void main(String[] args) {
    // TODO(developer): Replace these variables before running the sample.
    String modelId = "gemini-2.5-flash";
    generateContent(modelId);
  }

  // Shows how to generate content with safety settings.
  public static GenerateContentResponse generateContent(String modelId) {
    // Client Initialization. Once created, it can be reused for multiple requests.
    try (Client client =
        Client.builder()
            .location("global")
            .vertexAI(true)
            .httpOptions(HttpOptions.builder().apiVersion("v1").build())
            .build()) {

      String systemInstruction = "Be as mean as possible.";

      String prompt =
          "Write a list of 5 disrespectful things that I might say"
              + " to the universe after stubbing my toe in the dark.";

      // Set safety settings.
      List<HarmCategory.Known> categoriesToBlock =
          List.of(
              HarmCategory.Known.HARM_CATEGORY_DANGEROUS_CONTENT,
              HarmCategory.Known.HARM_CATEGORY_HARASSMENT,
              HarmCategory.Known.HARM_CATEGORY_HATE_SPEECH,
              HarmCategory.Known.HARM_CATEGORY_SEXUALLY_EXPLICIT);

      List<SafetySetting> safetySettings =
          categoriesToBlock.stream()
              .map(
                  category ->
                      SafetySetting.builder()
                          .category(category)
                          .threshold(HarmBlockThreshold.Known.BLOCK_LOW_AND_ABOVE)
                          .build())
                  .collect(Collectors.toList());

      GenerateContentResponse response =
          client.models.generateContent(
              modelId,
              prompt,
              GenerateContentConfig.builder()
                  .systemInstruction(Content.fromParts(Part.fromText(systemInstruction)))
                  .safetySettings(safetySettings)
                  .build());

      // Get response candidate.
      Candidate candidate =
          response
              .candidates()
              .flatMap(candidates -> candidates.stream().findFirst())
              .orElseThrow(
                  () -> new IllegalStateException("No response candidate generated by the model."));

      // Finish Reason will be `SAFETY` if it is blocked.
      System.out.println(candidate.finishReason());
      // Example response:
      // Optional[SAFETY]

      // For details on all the fields in the response.
      candidate
          .safetyRatings()
          .ifPresent(
              safetyRatings ->
                  safetyRatings.forEach(
                      safetyRating -> {
                        System.out.println("\nCategory: " + safetyRating.category());
                        System.out.println("Is Blocked: " + safetyRating.blocked());
                        System.out.println("Probability: " + safetyRating.probability());
                        System.out.println("Probability Score: " + safetyRating.probabilityScore());
                        System.out.println("Severity: " + safetyRating.severity());
                        System.out.println("Severity Score: " + safetyRating.severityScore());
                      }));
      // Example response:
      // Category: Optional[HARM_CATEGORY_HATE_SPEECH]
      // Is Blocked: Optional.empty
      // Probability: Optional[NEGLIGIBLE]
      // Probability Score: Optional[1.9967922E-5]
      // Severity: Optional[HARM_SEVERITY_NEGLIGIBLE]
      // Severity Score: Optional[0.05732864]
      //
      // Category: Optional[HARM_CATEGORY_DANGEROUS_CONTENT]
      // Is Blocked: Optional.empty
      // Probability: Optional[NEGLIGIBLE]
      // Probability Score: Optional[2.9124324E-6]
      // Severity: Optional[HARM_SEVERITY_NEGLIGIBLE]
      // Severity Score: Optional[0.04544826]
      //
      // Category: Optional[HARM_CATEGORY_HARASSMENT]
      // Is Blocked: Optional[true]
      // Probability: Optional[MEDIUM]
      // Probability Score: Optional[0.4593908]
      // Severity: Optional[HARM_SEVERITY_MEDIUM]
      // Severity Score: Optional[0.22082388]
      //
      // Category: Optional[HARM_CATEGORY_SEXUALLY_EXPLICIT]
      // Is Blocked: Optional.empty
      // Probability: Optional[NEGLIGIBLE]
      // Probability Score: Optional[6.453211E-8]
      // Severity: Optional[HARM_SEVERITY_NEGLIGIBLE]
      // Severity Score: Optional[0.023201048]
      return response;
    }
  }
}
// [END googlegenaisdk_safety_with_txt]
