"""
Guardrails AI 自定义验证器 - ML内容安全检测

基于训练好的BERT模型进行内容安全验证
"""
from typing import Callable, Dict, Optional, Any
from guardrails.validators import (
    Validator,
    ValidationResult,
    FailResult,
    PassResult,
    register_validator,
)
import torch
from transformers import BertTokenizer, BertForSequenceClassification
import os

@register_validator(name="ml-content-safety", data_type="string")
class MLContentSafety(Validator):
    """
    ML驱动的内容安全检测验证器

    使用训练好的BERT模型检测模型输出是否包含危险/违规内容

    参数:
        model_path: 训练好的模型路径
        threshold: 置信度阈值 (0-1)，默认0.5
        device: 运行设备 ('cpu', 'mps', 'cuda')
        on_fail: 失败时的处理策略
    """

    def __init__(
        self,
        model_path: str = "./models/safety_classifier",
        threshold: float = 0.5,
        device: Optional[str] = None,
        on_fail: Optional[Callable] = None,
        **kwargs
    ):
        super().__init__(
            on_fail=on_fail,
            model_path=model_path,
            threshold=threshold,
            device=device,
            **kwargs
        )

        self.model_path = model_path
        self.threshold = threshold

        # 自动选择设备
        if device is None:
            if torch.backends.mps.is_available():
                self.device = torch.device("mps")
            elif torch.cuda.is_available():
                self.device = torch.device("cuda")
            else:
                self.device = torch.device("cpu")
        else:
            self.device = torch.device(device)

        # 加载模型和tokenizer
        self._load_model()

    def _load_model(self):
        """加载训练好的BERT模型"""
        if not os.path.exists(self.model_path):
            raise ValueError(f"模型路径不存在: {self.model_path}")

        try:
            self.tokenizer = BertTokenizer.from_pretrained(self.model_path)
            self.model = BertForSequenceClassification.from_pretrained(self.model_path)
            self.model.to(self.device)
            self.model.eval()
        except Exception as e:
            raise RuntimeError(f"模型加载失败: {str(e)}")

    def _validate(self, value: str, metadata: Dict) -> ValidationResult:
        """
        验证文本内容是否安全

        参数:
            value: 待验证的文本
            metadata: 元数据

        返回:
            ValidationResult: 验证结果
        """
        if not isinstance(value, str):
            return FailResult(
                error_message="输入必须是字符串类型",
                fix_value=None
            )

        if not value.strip():
            return PassResult()

        # 文本编码
        inputs = self.tokenizer.encode_plus(
            value,
            add_special_tokens=True,
            max_length=128,
            padding='max_length',
            truncation=True,
            return_attention_mask=True,
            return_tensors='pt'
        )

        input_ids = inputs['input_ids'].to(self.device)
        attention_mask = inputs['attention_mask'].to(self.device)

        # 模型推理
        with torch.no_grad():
            outputs = self.model(
                input_ids=input_ids,
                attention_mask=attention_mask
            )

        # 获取预测结果
        logits = outputs.logits
        probabilities = torch.softmax(logits, dim=1)
        predicted_class = torch.argmax(probabilities, dim=1).item()
        confidence = probabilities[0][predicted_class].item()

        # label 0: 安全, label 1: 危险
        if predicted_class == 1 and confidence >= self.threshold:
            # 检测到危险内容
            error_message = (
                f"检测到违规内容 (置信度: {confidence:.2%})。"
                f"该内容可能包含不当言论、歧视性表达或其他违规信息。"
            )

            # 如果需要可解释性，可以在这里添加注意力权重分析
            # 这里简化处理，直接返回失败结果
            return FailResult(
                error_message=error_message,
                fix_value=None,  # 不提供自动修复
                metadata={
                    "predicted_class": predicted_class,
                    "confidence": confidence,
                    "is_safe": False
                }
            )
        else:
            # 内容安全
            return PassResult(
                metadata={
                    "predicted_class": predicted_class,
                    "confidence": confidence,
                    "is_safe": True
                }
            )


# 简化的函数式验证器
@register_validator(name="simple-ml-safety", data_type="string")
def simple_ml_safety(value: str, metadata: Dict) -> ValidationResult:
    """
    简化的ML安全验证器 - 函数式实现

    参数:
        value: 待验证文本
        metadata: 元数据

    返回:
        ValidationResult
    """
    model_path = metadata.get('model_path', "./models/safety_classifier")
    threshold = metadata.get('threshold', 0.5)
    validator = MLContentSafety(model_path=model_path, threshold=threshold)
    return validator._validate(value, metadata)
