import os
import argparse
import json
import logging
from datetime import datetime
from typing import Dict, Any, List

import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Subset
from torch.optim.lr_scheduler import CosineAnnealingLR
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import accuracy_score, f1_score, precision_score, recall_score

from transformers import AutoTokenizer

# 使用训练脚本中的实现，保证一致性
from train_classifier import (
    DiseaseTextImageDataset,
    TrainingLogger,
    TrainingVisualizer,
    train_one_epoch,
)
from moe_classifier import MultiModalMoEClassifier


def setup_logger(log_dir: str, name: str) -> logging.Logger:
    os.makedirs(log_dir, exist_ok=True)
    logger = logging.getLogger(name)
    logger.setLevel(logging.INFO)
    logger.handlers.clear()
    fh = logging.FileHandler(os.path.join(log_dir, f'{name}.log'), encoding='utf-8')
    fh.setLevel(logging.INFO)
    ch = logging.StreamHandler()
    ch.setLevel(logging.INFO)
    fmt = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
    fh.setFormatter(fmt)
    ch.setFormatter(fmt)
    logger.addHandler(fh)
    logger.addHandler(ch)
    return logger


@torch.no_grad()
def evaluate_with_loss(model, dataloader, device):
    ce_loss = nn.CrossEntropyLoss()
    model.eval()
    total_loss = 0.0
    all_preds, all_labels = [], []
    for batch in dataloader:
        images = batch['image'].to(device)
        input_ids = batch['input_ids'].to(device)
        attention_mask = batch['attention_mask'].to(device)
        labels = batch['label'].to(device)
        logits, _, _ = model(images, input_ids, attention_mask)
        loss = ce_loss(logits, labels)
        total_loss += loss.item() * images.size(0)
        preds = logits.argmax(dim=-1).detach().cpu().numpy()
        all_preds.extend(list(preds))
        all_labels.extend(list(labels.detach().cpu().numpy()))
    avg_loss = total_loss / len(dataloader.dataset)
    acc = accuracy_score(all_labels, all_preds)
    f1_macro = f1_score(all_labels, all_preds, average='macro')
    precision_macro = precision_score(all_labels, all_preds, average='macro', zero_division=0)
    recall_macro = recall_score(all_labels, all_preds, average='macro', zero_division=0)
    return avg_loss, acc, f1_macro, precision_macro, recall_macro


def run_kfold_eval(args: argparse.Namespace):
    # 设备设置，与训练保持一致
    if args.gpu_id is not None:
        os.environ['CUDA_VISIBLE_DEVICES'] = str(args.gpu_id)
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    else:
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    os.makedirs(args.output_dir, exist_ok=True)
    base_log_dir = os.path.join(args.output_dir, 'logs')
    os.makedirs(base_log_dir, exist_ok=True)
    main_logger = setup_logger(base_log_dir, 'kfold_eval')

    # 加载数据集与分词器
    if not os.path.exists(args.data_csv):
        raise FileNotFoundError(f"数据CSV不存在: {args.data_csv}")
    if not os.path.isdir(args.image_root):
        raise FileNotFoundError(f"图像根目录不存在: {args.image_root}")

    tokenizer = AutoTokenizer.from_pretrained(args.text_model)
    full_ds = DiseaseTextImageDataset(
        args.data_csv, args.image_root, tokenizer,
        image_size=args.image_size, max_len=args.max_len
    )

    labels = full_ds.df['label'].astype(int).tolist()
    skf = StratifiedKFold(n_splits=args.k_folds, shuffle=True, random_state=args.seed)

    fold_metrics: List[Dict[str, float]] = []

    for fold_idx, (train_idx, val_idx) in enumerate(skf.split(np.zeros(len(labels)), labels), start=1):
        fold_name = f"fold_{fold_idx}"
        fold_dir = os.path.join(args.output_dir, fold_name)
        os.makedirs(fold_dir, exist_ok=True)
        logger = setup_logger(fold_dir, fold_name)

        # 数据划分与加载器
        train_loader = DataLoader(
            Subset(full_ds, train_idx), batch_size=args.batch_size, shuffle=True,
            num_workers=args.num_workers, pin_memory=True
        )
        val_loader = DataLoader(
            Subset(full_ds, val_idx), batch_size=args.batch_size, shuffle=False,
            num_workers=args.num_workers, pin_memory=True
        )

        # 模型与优化器
        model = MultiModalMoEClassifier(
            num_classes=args.num_classes,
            img_backbone=args.img_backbone,
            text_model=args.text_model,
            feature_dim=args.feature_dim,
            num_experts=args.num_experts,
            top_k=args.top_k,
            train_text_encoder=args.train_text_encoder,
            visual_pretrained=getattr(args, 'visual_pretrained', False),
            use_visual_aux=getattr(args, 'use_visual_aux', False),
            visual_aux_weight=getattr(args, 'visual_aux_weight', 0.4),
            gating_strategy=getattr(args, 'gating_strategy', 'softmax_topk'),
            gating_temperature=getattr(args, 'gating_temperature', 1.0),
            gating_power_alpha=getattr(args, 'gating_power_alpha', 1.0),
            second_prob_threshold=getattr(args, 'second_prob_threshold', 0.0),
        ).to(device)

        optimizer = optim.AdamW(filter(lambda p: p.requires_grad, model.parameters()),
                                lr=args.lr, weight_decay=args.weight_decay)
        scheduler = CosineAnnealingLR(optimizer, T_max=args.epochs, eta_min=args.lr * 0.01)

        # 训练日志与可视化
        experiment_name = f"CV_{fold_name}_{datetime.now().strftime('%m%d_%H%M')}"
        logger_obj = TrainingLogger(fold_dir, experiment_name)
        visualizer = TrainingVisualizer(fold_dir, experiment_name)

        best_metric = -float('inf')
        best_epoch = 0
        patience_counter = 0

        for epoch in range(1, args.epochs + 1):
            # 训练一轮
            train_loss, train_acc, train_f1 = train_one_epoch(
                model, train_loader, optimizer, device, epoch, lb_coeff=args.lb_coeff
            )
            # 验证
            val_loss, val_acc, val_f1, val_prec, val_rec = evaluate_with_loss(model, val_loader, device)

            current_lr = optimizer.param_groups[0]['lr']
            scheduler.step()

            # 记录日志
            logger_obj.log_epoch(
                epoch=epoch,
                train_loss=train_loss,
                train_acc=train_acc,
                train_f1=train_f1,
                val_acc=val_acc,
                val_f1=val_f1,
                lr=current_lr,
                epoch_time=0.0,
                val_loss=val_loss,
            )

            logger.info(f"Epoch {epoch:03d} | TrainLoss {train_loss:.4f} | ValLoss {val_loss:.4f} | ValAcc {val_acc:.4f} | ValF1 {val_f1:.4f} | LR {current_lr:.2e}")

            # 早停机制：监控验证F1
            if val_f1 > best_metric + 1e-6:
                best_metric = val_f1
                best_epoch = epoch
                patience_counter = 0
                # 保存最佳模型（可选）
                torch.save({'model': model.state_dict()}, os.path.join(fold_dir, f"best.pt"))
            else:
                patience_counter += 1
                if patience_counter >= args.patience:
                    logger.info(f"早停触发：在第 {epoch} 轮停止（最佳在第 {best_epoch} 轮，ValF1={best_metric:.4f}）")
                    break

        # 绘制该fold的训练曲线
        metrics_dict = logger_obj.get_metrics()
        if metrics_dict:
            visualizer.plot_training_curves(metrics_dict, save_plots=True)
            visualizer.plot_loss_comparison(metrics_dict, save_plots=True)
            visualizer.plot_accuracy_comparison(metrics_dict, save_plots=True)

        # 记录最终的fold指标（取最佳epoch对应的验证指标）
        # 从metrics中取出最佳F1所在的值
        val_f1_list = metrics_dict.get('val_f1', [])
        val_acc_list = metrics_dict.get('val_acc', [])
        val_loss_list = metrics_dict.get('val_loss', [])
        best_f1 = max(val_f1_list) if val_f1_list else 0.0
        best_idx = val_f1_list.index(best_f1) if val_f1_list else 0
        best_acc = val_acc_list[best_idx] if val_acc_list else 0.0
        best_loss = val_loss_list[best_idx] if val_loss_list else None

        fold_summary = {
            'fold': fold_idx,
            'best_epoch': best_epoch,
            'best_val_f1': float(best_f1),
            'best_val_acc': float(best_acc),
            'best_val_loss': float(best_loss) if best_loss is not None else None,
        }
        with open(os.path.join(fold_dir, 'fold_metrics.json'), 'w', encoding='utf-8') as f:
            json.dump(fold_summary, f, indent=2, ensure_ascii=False)
        fold_metrics.append(fold_summary)

    # 汇总K-fold结果
    f1s = [m['best_val_f1'] for m in fold_metrics]
    accs = [m['best_val_acc'] for m in fold_metrics]
    summary = {
        'k_folds': args.k_folds,
        'f1_macro_mean': float(np.mean(f1s)),
        'f1_macro_std': float(np.std(f1s)),
        'acc_mean': float(np.mean(accs)),
        'acc_std': float(np.std(accs)),
        'fold_details': fold_metrics,
    }
    with open(os.path.join(args.output_dir, 'kfold_summary.json'), 'w', encoding='utf-8') as f:
        json.dump(summary, f, indent=2, ensure_ascii=False)
    main_logger.info(f"K-fold完成 | F1(m)={summary['f1_macro_mean']:.4f}±{summary['f1_macro_std']:.4f} | Acc={summary['acc_mean']:.4f}±{summary['acc_std']:.4f}")


def build_parser() -> argparse.ArgumentParser:
    parser = argparse.ArgumentParser(description='MoE分类器交叉验证脚本')
    # 数据相关
    parser.add_argument('--data_csv', type=str, default='data/train.csv', help='用于K-fold的CSV（含image,text,label）')
    parser.add_argument('--image_root', type=str, default='data/images', help='图像根目录')

    # 模型与训练参数（与训练脚本保持一致）
    parser.add_argument('--num_classes', type=int, required=True, help='分类类别数量')
    parser.add_argument('--img_backbone', type=str, default='resnet50', help='图像骨干网络')
    parser.add_argument('--text_model', type=str, default='distilbert-base-uncased', help='文本模型')
    parser.add_argument('--feature_dim', type=int, default=512, help='特征维度')
    parser.add_argument('--num_experts', type=int, default=3, help='专家网络数量')
    parser.add_argument('--top_k', type=int, default=2, help='Top-K专家选择')
    parser.add_argument('--train_text_encoder', action='store_true', help='是否训练文本编码器')
    parser.add_argument('--batch_size', type=int, default=32, help='批次大小')
    parser.add_argument('--epochs', type=int, default=20, help='训练轮数')
    parser.add_argument('--lr', type=float, default=3e-4, help='学习率')
    parser.add_argument('--weight_decay', type=float, default=1e-4, help='权重衰减')
    parser.add_argument('--image_size', type=int, default=224, help='图像尺寸')
    parser.add_argument('--max_len', type=int, default=64, help='文本最大长度')
    parser.add_argument('--lb_coeff', type=float, default=0.01, help='负载均衡系数')
    parser.add_argument('--gpu_id', type=int, default=None, help='指定GPU ID')
    parser.add_argument('--num_workers', type=int, default=4, help='数据加载器工作进程数')

    # K-fold与早停
    parser.add_argument('--k_folds', type=int, default=5, help='K折数量')
    parser.add_argument('--patience', type=int, default=5, help='早停耐心')
    parser.add_argument('--seed', type=int, default=42, help='随机种子')

    # 输出
    parser.add_argument('--output_dir', type=str, default='checkpoints/kfold_eval', help='输出目录')
    return parser


if __name__ == '__main__':
    parser = build_parser()
    args = parser.parse_args()
    try:
        run_kfold_eval(args)
    except Exception as e:
        print(f"❌ 验证失败: {e}")
        raise
    # 新增：增强与模型策略参数用于消融
    parser.add_argument('--visual_pretrained', action='store_true', help='使用预训练视觉骨干')
    parser.add_argument('--use_visual_aux', action='store_true', help='启用视觉辅助分类头')
    parser.add_argument('--visual_aux_weight', type=float, default=0.4, help='视觉辅助损失权重')
    parser.add_argument('--gating_strategy', type=str, default='softmax_topk', choices=['softmax_topk','prob_norm','softmax_temp','power'], help='门控融合权重策略')
    parser.add_argument('--gating_temperature', type=float, default=1.0, help='Softmax温度')
    parser.add_argument('--gating_power_alpha', type=float, default=1.0, help='幂次加权alpha')
    parser.add_argument('--second_prob_threshold', type=float, default=0.0, help='Top-2次优阈值，低于则退化Top-1')