import os
import argparse
import json
import logging
from datetime import datetime
from typing import Dict, Any

import torch
from torch.utils.data import DataLoader
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score, classification_report
import pandas as pd

from transformers import AutoTokenizer

# 复用训练脚本中的数据集与常量
try:
    from train_classifier import DiseaseTextImageDataset, IMAGENET_MEAN, IMAGENET_STD
except Exception:
    # 回退常量，数据集在本脚本中再实现一个轻量版（仅在导入失败时）
    IMAGENET_MEAN = (0.485, 0.456, 0.406)
    IMAGENET_STD = (0.229, 0.224, 0.225)
    from torchvision import transforms
    from PIL import Image
    import pandas as pd
    class DiseaseTextImageDataset(torch.utils.data.Dataset):
        def __init__(self, csv_path: str, image_root: str, tokenizer, image_size: int = 224, max_len: int = 64):
            super().__init__()
            self.df = pd.read_csv(csv_path)
            assert {'image', 'text', 'label'}.issubset(set(self.df.columns)), "CSV必须包含列: image, text, label"
            self.image_root = image_root
            self.tokenizer = tokenizer
            self.max_len = max_len
            self.transform = transforms.Compose([
                transforms.Resize((image_size, image_size)),
                transforms.ToTensor(),
                transforms.Normalize(mean=IMAGENET_MEAN, std=IMAGENET_STD),
            ])

        def __len__(self):
            return len(self.df)

        def __getitem__(self, idx):
            row = self.df.iloc[idx]
            img_path = os.path.join(self.image_root, row['image'])
            text = str(row['text'])
            label = int(row['label'])

            image = Image.open(img_path).convert('RGB')
            image = self.transform(image)

            tokens = self.tokenizer(
                text,
                truncation=True,
                padding='max_length',
                max_length=self.max_len,
                return_tensors='pt'
            )

            item = {
                'image': image,
                'input_ids': tokens['input_ids'].squeeze(0),
                'attention_mask': tokens['attention_mask'].squeeze(0),
                'label': torch.tensor(label, dtype=torch.long)
            }
            return item

from moe_classifier import MultiModalMoEClassifier


def setup_logger(log_dir: str) -> logging.Logger:
    os.makedirs(log_dir, exist_ok=True)
    logger = logging.getLogger(f"test_{datetime.now().strftime('%Y%m%d_%H%M%S')}")
    logger.setLevel(logging.INFO)
    logger.handlers.clear()
    fh = logging.FileHandler(os.path.join(log_dir, 'test.log'), encoding='utf-8')
    fh.setLevel(logging.INFO)
    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)
    fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
    fh.setFormatter(fmt)
    ch.setFormatter(fmt)
    logger.addHandler(fh)
    logger.addHandler(ch)
    return logger


def load_model_from_ckpt(ckpt_path: str, args: argparse.Namespace, device: torch.device) -> MultiModalMoEClassifier:
    if not os.path.exists(ckpt_path):
        raise FileNotFoundError(f"未找到checkpoint文件: {ckpt_path}")
    ckpt = torch.load(ckpt_path, map_location=device)
    ckpt_args = ckpt.get('args', {})

    # 优先使用命令行参数，如未提供则用训练时的参数保持一致
    num_classes = args.num_classes if args.num_classes is not None else ckpt_args.get('num_classes')
    if num_classes is None:
        raise ValueError("必须提供 --num_classes，或确保checkpoint中包含训练时的num_classes")

    img_backbone = args.img_backbone or ckpt_args.get('img_backbone', 'resnet50')
    text_model = args.text_model or ckpt_args.get('text_model', 'distilbert-base-uncased')
    feature_dim = args.feature_dim or ckpt_args.get('feature_dim', 512)
    num_experts = args.num_experts or ckpt_args.get('num_experts', 3)
    top_k = args.top_k or ckpt_args.get('top_k', 2)
    train_text_encoder = args.train_text_encoder or ckpt_args.get('train_text_encoder', False)

    model = MultiModalMoEClassifier(
        num_classes=num_classes,
        img_backbone=img_backbone,
        text_model=text_model,
        feature_dim=feature_dim,
        num_experts=num_experts,
        top_k=top_k,
        train_text_encoder=train_text_encoder,
        visual_pretrained=args.visual_pretrained if hasattr(args, 'visual_pretrained') else False,
        use_visual_aux=args.use_visual_aux if hasattr(args, 'use_visual_aux') else False,
        visual_aux_weight=args.visual_aux_weight if hasattr(args, 'visual_aux_weight') else 0.4,
        gating_strategy=args.gating_strategy if hasattr(args, 'gating_strategy') else 'softmax_topk',
        gating_temperature=args.gating_temperature if hasattr(args, 'gating_temperature') else 1.0,
        gating_power_alpha=args.gating_power_alpha if hasattr(args, 'gating_power_alpha') else 1.0,
        second_prob_threshold=args.second_prob_threshold if hasattr(args, 'second_prob_threshold') else 0.0,
    ).to(device)

    state = ckpt.get('model', ckpt)
    model.load_state_dict(state, strict=True)
    model.eval()
    return model


def compute_metrics(y_true, y_pred) -> Dict[str, Any]:
    metrics = {
        'accuracy': float(accuracy_score(y_true, y_pred)),
        'precision_macro': float(precision_score(y_true, y_pred, average='macro', zero_division=0)),
        'recall_macro': float(recall_score(y_true, y_pred, average='macro', zero_division=0)),
        'f1_macro': float(f1_score(y_true, y_pred, average='macro', zero_division=0)),
    }
    return metrics


def run_test(args: argparse.Namespace):
    # 设备选择，与训练脚本保持一致
    if args.gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    else:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    os.makedirs(args.output_dir, exist_ok=True)
    logger = setup_logger(os.path.join(args.output_dir, 'logs'))

    # 基本检查
    if not os.path.exists(args.test_csv):
        raise FileNotFoundError(f"测试CSV不存在: {args.test_csv}")
    if not os.path.isdir(args.image_root):
        raise FileNotFoundError(f"图像根目录不存在: {args.image_root}")

    logger.info("加载分词器与数据集（保持与训练一致）...")
    # 为保证一致性，若未显式指定 text_model，则从checkpoint的args中读取
    text_model_name = args.text_model
    try:
        if text_model_name is None:
            ckpt_tmp = torch.load(args.checkpoint, map_location='cpu')
            text_model_name = ckpt_tmp.get('args', {}).get('text_model', 'distilbert-base-uncased')
    except Exception:
        # 如果读取失败，退回默认模型名
        text_model_name = text_model_name or 'distilbert-base-uncased'
    tokenizer = AutoTokenizer.from_pretrained(text_model_name)
    test_ds = DiseaseTextImageDataset(
        args.test_csv, args.image_root, tokenizer,
        image_size=args.image_size, max_len=args.max_len
    )
    test_loader = DataLoader(
        test_ds, batch_size=args.batch_size, shuffle=False,
        num_workers=args.num_workers, pin_memory=True
    )

    logger.info("构建并加载模型参数...")
    model = load_model_from_ckpt(args.checkpoint, args, device)

    all_preds, all_labels = [], []
    # 门控统计累积器
    sum_gate_probs = None
    top1_counts = None
    topk_counts = None
    total_samples = 0
    sample_rows = []
    logger.info("开始批量测试...")
    with torch.no_grad():
        for batch in test_loader:
            images = batch['image'].to(device)
            input_ids = batch['input_ids'].to(device)
            attention_mask = batch['attention_mask'].to(device)
            labels = batch['label'].to(device)
            logits, gate_probs, _ = model(images, input_ids, attention_mask)
            preds = logits.argmax(dim=-1).detach().cpu().numpy()
            all_preds.extend(list(preds))
            all_labels.extend(list(labels.detach().cpu().numpy()))
            # 记录门控使用情况
            if args.report_gate_usage:
                bs = images.size(0)
                total_samples += bs
                if sum_gate_probs is None:
                    sum_gate_probs = gate_probs.sum(dim=0)
                    top1_counts = torch.zeros(gate_probs.size(-1), device=gate_probs.device)
                    topk_counts = torch.zeros(gate_probs.size(-1), device=gate_probs.device)
                else:
                    sum_gate_probs += gate_probs.sum(dim=0)
                top1 = gate_probs.argmax(dim=-1)
                for i in range(gate_probs.size(-1)):
                    top1_counts[i] += (top1 == i).sum()
                k = getattr(model, 'top_k', 1)
                if k > 1:
                    topk_idx = torch.topk(gate_probs, k=k, dim=-1).indices
                    for i in range(gate_probs.size(-1)):
                        topk_counts[i] += (topk_idx == i).any(dim=-1).sum()

    metrics = compute_metrics(all_labels, all_preds)
    logger.info(f"测试完成 | Acc: {metrics['accuracy']:.4f} | Recall(macro): {metrics['recall_macro']:.4f} | F1(macro): {metrics['f1_macro']:.4f}")

    # 保存指标与预测
    ts = datetime.now().strftime('%Y%m%d_%H%M%S')
    metrics_path = os.path.join(args.output_dir, f"test_metrics_{ts}.json")
    with open(metrics_path, 'w', encoding='utf-8') as f:
        json.dump(metrics, f, indent=2, ensure_ascii=False)

    # 生成分类报告
    report = classification_report(all_labels, all_preds, digits=4, output_dict=True, zero_division=0)
    report_path = os.path.join(args.output_dir, f"test_classification_report_{ts}.json")
    with open(report_path, 'w', encoding='utf-8') as f:
        json.dump(report, f, indent=2, ensure_ascii=False)

    # 保存逐样本预测（从原CSV读取 image/text/label）
    df = pd.read_csv(args.test_csv)
    df_out = df.copy()
    df_out['pred'] = all_preds
    csv_path = os.path.join(args.output_dir, f"test_predictions_{ts}.csv")
    df_out.to_csv(csv_path, index=False)

    logger.info(f"指标JSON保存至: {metrics_path}")
    logger.info(f"分类报告保存至: {report_path}")
    logger.info(f"预测结果CSV保存至: {csv_path}")

    # 输出门控统计报告
    if args.report_gate_usage and sum_gate_probs is not None and total_samples > 0:
        mean_probs = (sum_gate_probs / total_samples).detach().cpu().numpy().tolist()
        top1_rates = (top1_counts / total_samples).detach().cpu().numpy().tolist()
        if topk_counts is not None:
            topk_rates = (topk_counts / total_samples).detach().cpu().numpy().tolist()
        else:
            topk_rates = None
        gate_report = {
            'mean_gate_probs': mean_probs,
            'top1_selected_rate': top1_rates,
            'topk_selected_rate': topk_rates,
            'num_experts': int(top1_counts.numel()),
            'top_k': int(getattr(model, 'top_k', 1)),
            'gating_strategy': getattr(model, 'gating_strategy', 'softmax_topk'),
            'second_prob_threshold': float(getattr(model, 'second_prob_threshold', 0.0)),
        }
        ts = datetime.now().strftime('%Y%m%d_%H%M%S')
        gate_path = os.path.join(args.output_dir, 'logs', f"gate_usage_report_{ts}.json")
        os.makedirs(os.path.dirname(gate_path), exist_ok=True)
        with open(gate_path, 'w', encoding='utf-8') as f:
            json.dump(gate_report, f, indent=2, ensure_ascii=False)
        logger.info(f"门控使用率报告保存至: {gate_path}")


def build_parser() -> argparse.ArgumentParser:
    parser = argparse.ArgumentParser(description='MoE分类器测试脚本')
    parser.add_argument('--test_csv', type=str, default='data/test.csv', help='测试数据CSV路径')
    parser.add_argument('--image_root', type=str, default='data/images', help='图像根目录')
    parser.add_argument('--checkpoint', type=str, required=True, help='训练好的checkpoint路径')

    # 与训练参数保持一致（必要项允许覆盖）
    parser.add_argument('--num_classes', type=int, default=None, help='类别数（可从checkpoint中读取）')
    parser.add_argument('--img_backbone', type=str, default=None, help='图像骨干网络')
    parser.add_argument('--text_model', type=str, default=None, help='文本模型名称')
    parser.add_argument('--feature_dim', type=int, default=None, help='特征维度')
    parser.add_argument('--num_experts', type=int, default=None, help='专家网络数量')
    parser.add_argument('--top_k', type=int, default=None, help='Top-K专家选择')
    parser.add_argument('--train_text_encoder', action='store_true', help='是否训练文本编码器（一般测试不启用）')

    parser.add_argument('--batch_size', type=int, default=64, help='批次大小')
    parser.add_argument('--image_size', type=int, default=224, help='图像尺寸')
    parser.add_argument('--max_len', type=int, default=64, help='文本最大长度')
    parser.add_argument('--num_workers', type=int, default=4, help='DataLoader工作进程数')
    parser.add_argument('--gpu_id', type=int, default=None, help='指定GPU ID')
    parser.add_argument('--output_dir', type=str, default='checkpoints/test_outputs', help='输出目录')
    parser.add_argument('--report_gate_usage', action='store_true', help='在测试阶段记录门控概率与Top-K选择率')
    # 新增：门控融合/视觉辅助/预训练选项（测试时可用于消融）
    parser.add_argument('--visual_pretrained', action='store_true', help='使用预训练视觉骨干')
    parser.add_argument('--use_visual_aux', action='store_true', help='启用视觉辅助分类头')
    parser.add_argument('--visual_aux_weight', type=float, default=0.4, help='视觉辅助损失权重')
    parser.add_argument('--gating_strategy', type=str, default='softmax_topk', choices=['softmax_topk','prob_norm','softmax_temp','power'], help='门控融合权重策略')
    parser.add_argument('--gating_temperature', type=float, default=1.0, help='Softmax温度')
    parser.add_argument('--gating_power_alpha', type=float, default=1.0, help='幂次加权alpha')
    parser.add_argument('--second_prob_threshold', type=float, default=0.0, help='Top-2次优阈值')
    return parser


if __name__ == '__main__':
    parser = build_parser()
    args = parser.parse_args()
    try:
        run_test(args)
    except Exception as e:
        print(f"❌ 测试失败: {e}")
        raise