package com.zjj.lbw.ai;

import dev.langchain4j.model.moderation.Moderation;
import dev.langchain4j.model.moderation.ModerationModel;
import dev.langchain4j.model.openai.OpenAiModerationModel;
import dev.langchain4j.model.output.Response;

public class ModerationModelTest {
    public static void main(String[] args) {
        // ModerationModel能够校验输入中是否存在敏感内容。
        ModerationModel moderationModel = OpenAiModerationModel.withApiKey("demo");
        // 如果一句话中有铭感词，会将这句话返回
        Response<Moderation> response = moderationModel.moderate("我要杀了你");
        System.out.println(response.content().flaggedText());
    }
}
