package com.hsl.housaileibot001.ai.Guardrail;

import dev.langchain4j.data.message.UserMessage;
import dev.langchain4j.guardrail.InputGuardrail;
import dev.langchain4j.guardrail.InputGuardrailResult;

import java.util.Set;

/**
 * @author liul
 * @date 2025/10/18 23:35
 */
public class SafeInputGuardrail implements InputGuardrail {

    private final Set<String> bannedWords = Set.of("kill", "sb", "fuck");

    /**
     * 检测用户输入是否安全
     */
    @Override
    public InputGuardrailResult validate(UserMessage userMessage) {
        // 获取用户输入
        String input = userMessage.singleText().toLowerCase();
        // 使用正则表达式分割输入文本为单词
        String[] words = input.split("\\W+");

        for (String word : words) {
            if (bannedWords.contains(word)) {
                return failure("输入包含敏感词：" + word);
            }
        }
        return success();
    }


}
