#!/usr/bin/env python3
"""
QwenGuard 模型评估脚本
功能：
1. 加载训练好的模型
2. 在验证集上评估
3. 计算详细指标
4. 生成评估报告
"""

import time
import argparse
import warnings
from typing import Dict, List, Tuple
from pathlib import Path

import pandas as pd
import numpy as np
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer
from sklearn.metrics import (
    accuracy_score,
    precision_score,
    recall_score,
    f1_score,
    roc_auc_score,
    confusion_matrix,
    classification_report
)
from tqdm import tqdm
import json

warnings.filterwarnings('ignore')


class QwenGuardEvaluator:
    """QwenGuard 模型评估器"""

    def __init__(self, model_path: str, device: str = "auto"):
        """
        初始化评估器

        Args:
            model_path: 模型路径
            device: 设备类型 (auto/cuda/mps/cpu)
        """
        print(f"正在加载模型: {model_path}")
        self.model_path = model_path
        self.device = self._get_device(device)
        print(f"使用设备: {self.device}")

        # 加载模型和 tokenizer
        self._load_model()

        print("✅ 模型加载完成！\n")

    def _get_device(self, device: str) -> str:
        """获取可用的计算设备"""
        if device != "auto":
            return device

        if torch.cuda.is_available():
            return "cuda"
        elif torch.backends.mps.is_available():
            return "mps"
        else:
            return "cpu"

    def _load_model(self):
        """加载模型和分词器"""
        # 加载 tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(
            self.model_path,
            trust_remote_code=True
        )

        # 加载模型
        model_kwargs = {"trust_remote_code": True}

        if self.device == "cuda":
            model_kwargs["torch_dtype"] = torch.bfloat16
            model_kwargs["device_map"] = "auto"
        elif self.device == "mps":
            model_kwargs["torch_dtype"] = torch.float16
        else:
            model_kwargs["torch_dtype"] = torch.float32

        self.model = AutoModelForCausalLM.from_pretrained(
            self.model_path,
            **model_kwargs
        )

        # 如果是 MPS 或 CPU，手动移动到设备
        if self.device in ["mps", "cpu"]:
            self.model = self.model.to(self.device)

        self.model.eval()

    def predict_single(self, text: str) -> Tuple[int, float, str]:
        """
        对单个文本进行预测

        Args:
            text: 输入文本

        Returns:
            (predicted_label, confidence, raw_output):
            预测标签（0=安全，1=有毒）、置信度、原始输出
        """
        # 构造消息格式
        messages = [{"role": "user", "content": text}]

        # 应用聊天模板
        formatted_text = self.tokenizer.apply_chat_template(
            messages,
            tokenize=False,
            add_generation_prompt=True
        )

        # Tokenize
        model_inputs = self.tokenizer(
            [formatted_text],
            return_tensors="pt"
        ).to(self.model.device)

        # 生成预测
        with torch.no_grad():
            outputs = self.model.generate(
                **model_inputs,
                max_new_tokens=128,
                do_sample=False,
                temperature=None,
                top_p=None,
                return_dict_in_generate=True,
                output_scores=True
            )
            generated_ids = outputs.sequences

        # 解码输出
        raw_output = self.tokenizer.decode(
            generated_ids[0][len(model_inputs.input_ids[0]):],
            skip_special_tokens=True
        )

        # 计算置信度
        confidence = self._calculate_confidence(outputs.scores)

        # 解析输出，提取安全等级
        predicted_label = self._parse_output(raw_output)

        return predicted_label, confidence, raw_output

    def _calculate_confidence(self, scores: tuple) -> float:
        """
        从生成的 token scores 计算置信度

        Args:
            scores: 模型生成的 logits 元组

        Returns:
            置信度（0-1之间）
        """
        if scores is None or len(scores) == 0:
            return 0.5

        # 计算所有生成 token 的平均概率
        all_probs = []
        for token_scores in scores:
            probs = torch.softmax(token_scores[0], dim=0)
            max_prob = probs.max().item()
            all_probs.append(max_prob)

        return float(np.mean(all_probs))

    def _parse_output(self, output: str) -> int:
        """
        解析模型输出，提取标签

        Args:
            output: 模型生成的文本

        Returns:
            label: 标签（0=安全，1=有毒）
        """
        output_lower = output.lower()

        # 提取安全等级
        if "unsafe" in output_lower:
            return 1  # 有毒
        elif "controversial" in output_lower:
            return 1  # 有争议也归为有毒
        elif "safe" in output_lower:
            return 0  # 安全
        else:
            # 无法解析，默认为安全
            return 0

    def evaluate_dataset(
        self,
        dataset_path: str,
        max_samples: int = None,
        output_dir: str = None
    ) -> Dict[str, float]:
        """
        评估数据集

        Args:
            dataset_path: 数据集路径
            max_samples: 最大样本数（None 表示全部）
            output_dir: 输出目录（保存详细结果）

        Returns:
            评估指标字典
        """
        print(f"\n{'='*70}")
        print(f"正在评估数据集: {dataset_path}")
        print(f"{'='*70}")

        # 加载数据集
        df = pd.read_csv(dataset_path)

        # 检查数据集格式
        if 'text' not in df.columns or 'label' not in df.columns:
            raise ValueError("数据集必须包含 'text' 和 'label' 列")

        # 移除空值
        df = df.dropna(subset=['text', 'label'])

        # 限制样本数
        if max_samples is not None and len(df) > max_samples:
            df = df.sample(n=max_samples, random_state=42)

        total_samples = len(df)
        print(f"样本数: {total_samples:,}")

        # 预测
        predictions = []
        confidences = []
        raw_outputs = []
        true_labels = df['label'].tolist()

        start_time = time.time()

        for idx, row in tqdm(df.iterrows(), total=total_samples, desc="评估中"):
            text = str(row['text'])
            pred_label, confidence, raw_output = self.predict_single(text)

            predictions.append(pred_label)
            confidences.append(confidence)
            raw_outputs.append(raw_output)

        end_time = time.time()
        total_time = end_time - start_time

        # 计算评估指标
        metrics = self._calculate_metrics(
            true_labels,
            predictions,
            confidences,
            total_samples,
            total_time
        )

        # 保存详细结果
        if output_dir:
            self._save_detailed_results(
                df, predictions, confidences, raw_outputs, metrics, output_dir
            )

        return metrics

    def _calculate_metrics(
        self,
        true_labels: List[int],
        predictions: List[int],
        confidences: List[float],
        total_samples: int,
        total_time: float
    ) -> Dict[str, float]:
        """
        计算评估指标

        Args:
            true_labels: 真实标签
            predictions: 预测标签
            confidences: 置信度列表
            total_samples: 总样本数
            total_time: 总耗时（秒）

        Returns:
            评估指标字典
        """
        # 基础指标
        accuracy = accuracy_score(true_labels, predictions)
        precision = precision_score(true_labels, predictions, zero_division=0)
        recall = recall_score(true_labels, predictions, zero_division=0)
        f1 = f1_score(true_labels, predictions, zero_division=0)

        # 混淆矩阵
        cm = confusion_matrix(true_labels, predictions)
        tn, fp, fn, tp = cm.ravel() if cm.size == 4 else (0, 0, 0, 0)

        # AUC (需要概率分数)
        probs = []
        for pred, conf in zip(predictions, confidences):
            if pred == 1:
                probs.append(conf)
            else:
                probs.append(1 - conf)

        try:
            auc = roc_auc_score(true_labels, probs)
        except ValueError:
            auc = 0.0

        # 平均置信度
        avg_confidence = np.mean(confidences)

        # 推理速度
        inference_speed = total_samples / total_time if total_time > 0 else 0

        # 打印结果
        print(f"\n{'='*70}")
        print("📊 评估结果")
        print(f"{'='*70}")
        print(f"样本数:         {total_samples:,}")
        print(f"准确率:         {accuracy:.4f} ({accuracy*100:.2f}%)")
        print(f"精确率:         {precision:.4f} ({precision*100:.2f}%)")
        print(f"召回率:         {recall:.4f} ({recall*100:.2f}%)")
        print(f"F1分数:         {f1:.4f} ({f1*100:.2f}%)")
        print(f"AUC:            {auc:.4f}")
        print(f"平均置信度:     {avg_confidence:.4f}")
        print(f"推理速度:       {inference_speed:.2f} samples/s")
        print(f"总耗时:         {total_time:.2f} 秒")

        print(f"\n混淆矩阵:")
        print(f"{'':>15} {'预测安全':>12} {'预测有毒':>12}")
        print(f"{'实际安全':>15} {tn:>12,} {fp:>12,}")
        print(f"{'实际有毒':>15} {fn:>12,} {tp:>12,}")

        print(f"\n详细分类报告:")
        print(classification_report(
            true_labels,
            predictions,
            target_names=['安全(0)', '有毒(1)'],
            digits=4
        ))

        print(f"{'='*70}\n")

        # 返回指标
        return {
            "total_samples": total_samples,
            "accuracy": accuracy,
            "precision": precision,
            "recall": recall,
            "f1_score": f1,
            "auc": auc,
            "avg_confidence": avg_confidence,
            "inference_speed": inference_speed,
            "total_time": total_time,
            "confusion_matrix": {
                "tn": int(tn),
                "fp": int(fp),
                "fn": int(fn),
                "tp": int(tp)
            }
        }

    def _save_detailed_results(
        self,
        df: pd.DataFrame,
        predictions: List[int],
        confidences: List[float],
        raw_outputs: List[str],
        metrics: Dict,
        output_dir: str
    ):
        """保存详细的评估结果"""
        output_path = Path(output_dir)
        output_path.mkdir(parents=True, exist_ok=True)

        # 保存预测结果
        results_df = df.copy()
        results_df['predicted_label'] = predictions
        results_df['confidence'] = confidences
        results_df['raw_output'] = raw_outputs
        results_df['is_correct'] = results_df['label'] == results_df['predicted_label']

        results_csv_path = output_path / 'evaluation_predictions.csv'
        results_df.to_csv(results_csv_path, index=False, encoding='utf-8-sig')
        print(f"✅ 预测结果已保存: {results_csv_path}")

        # 保存评估指标
        metrics_path = output_path / 'evaluation_metrics.json'
        with open(metrics_path, 'w', encoding='utf-8') as f:
            json.dump(metrics, f, indent=2, ensure_ascii=False)
        print(f"✅ 评估指标已保存: {metrics_path}")


def main():
    """主函数"""
    parser = argparse.ArgumentParser(description="QwenGuard 模型评估")
    parser.add_argument(
        "--model_path",
        type=str,
        default="../outputs/best_model",
        help="模型路径"
    )
    parser.add_argument(
        "--data_path",
        type=str,
        default="../processed_data/val.csv",
        help="验证集路径"
    )
    parser.add_argument(
        "--max_samples",
        type=int,
        default=None,
        help="最大评估样本数"
    )
    parser.add_argument(
        "--output_dir",
        type=str,
        default="../outputs/evaluation",
        help="输出目录"
    )
    parser.add_argument(
        "--device",
        type=str,
        default="auto",
        choices=["auto", "cuda", "mps", "cpu"],
        help="设备类型"
    )

    args = parser.parse_args()

    # 创建评估器
    evaluator = QwenGuardEvaluator(
        model_path=args.model_path,
        device=args.device
    )

    # 评估数据集
    metrics = evaluator.evaluate_dataset(
        dataset_path=args.data_path,
        max_samples=args.max_samples,
        output_dir=args.output_dir
    )

    print("✅ 评估完成！")


if __name__ == "__main__":
    main()
