#!/usr/bin/env python3
"""
快速训练脚本 - 用于测试和调试
减少参数量，快速验证
"""

import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from sklearn.metrics import classification_report
from pathlib import Path
import time
from datetime import datetime

# 获取uc_model目录路径
UC_MODEL_DIR = Path(__file__).parent
sys.path.insert(0, str(UC_MODEL_DIR))

from utils.dataset import UCEISDataset, train_transform, val_transform, split_dataset_by_files
from models.fusion_model import EndoMultimodalModel

class QuickConfig:
    """快速训练配置"""
    # 数据
    data_dir = r"D:\肠内镜数据库\UCEIS1-8"
    batch_size = 16  # 小批量
    num_workers = 2

    # 模型 - 使用较小的特征维度
    image_pretrained = False  # 不使用预训练，减少初始化时间
    text_pretrained = 'bert-base-uncased'
    feature_dim = 128  # 减少特征维度
    num_classes = 8

    # 训练 - 只训练几个epoch
    epochs = 3  # 先只训练3个epoch测试
    lr = 1e-4
    weight_decay = 1e-5

    # 其他
    seed = 42
    save_dir = UC_MODEL_DIR / "outputs" / "quick_test"
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

def main():
    print("="*60)
    print("快速训练开始 - 测试版本")
    print("="*60)

    config = QuickConfig()

    # 创建保存目录
    config.save_dir.mkdir(parents=True, exist_ok=True)

    # 设置随机种子
    torch.manual_seed(config.seed)

    print(f"设备: {config.device}")
    print(f"数据目录: {config.data_dir}")

    # 检查数据
    data_path = Path(config.data_dir)
    if not data_path.exists():
        print(f"错误: 数据目录不存在 - {config.data_dir}")
        return

    # 统计数据
    all_files = list(data_path.rglob("*.bmp"))
    print(f"总图像数: {len(all_files)}")

    # 获取所有文件
    print("\n扫描数据文件...")
    print(f"找到图像文件: {len(all_files)}")

    # 分割文件列表 - 需要先创建split_dataset_by_files函数
    import random
    random.shuffle(all_files)
    train_size = int(0.8 * len(all_files))
    train_files = all_files[:train_size]
    val_files = all_files[train_size:]

    print(f"训练文件: {len(train_files)}")
    print(f"验证文件: {len(val_files)}")

    # 创建数据集
    print("\n创建数据集...")
    train_dataset = UCEISDataset(
        file_list=train_files,
        transform=train_transform,
        mode='train'
    )

    val_dataset = UCEISDataset(
        file_list=val_files,
        transform=val_transform,
        mode='val'
    )

    print(f"训练集: {len(train_dataset)}")
    print(f"验证集: {len(val_dataset)}")

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_workers,
        pin_memory=True
    )

    val_loader = DataLoader(
        val_dataset,
        batch_size=config.batch_size,
        shuffle=False,
        num_workers=config.num_workers,
        pin_memory=True
    )

    # 创建模型
    print("\n创建模型...")
    model = EndoMultimodalModel(
        image_pretrained=config.image_pretrained,
        text_pretrained=config.text_pretrained,
        feature_dim=config.feature_dim,
        num_classes=config.num_classes
    ).to(config.device)

    param_count = sum(p.numel() for p in model.parameters())
    trainable_param_count = sum(p.numel() for p in model.parameters() if p.requires_grad)

    print(f"模型参数量: {param_count:,}")
    print(f"可训练参数: {trainable_param_count:,}")

    # 优化器和损失函数
    optimizer = optim.AdamW(
        model.parameters(),
        lr=config.lr,
        weight_decay=config.weight_decay
    )

    criterion = nn.CrossEntropyLoss()

    # 训练循环
    print("\n开始训练...")
    best_val_acc = 0.0

    for epoch in range(config.epochs):
        print(f"\nEpoch {epoch+1}/{config.epochs}")
        print("-"*40)

        # 训练
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0

        start_time = time.time()

        for batch_idx, batch in enumerate(train_loader):
            if batch_idx % 10 == 0:
                print(f"  Batch {batch_idx+1}/{len(train_loader)}")

            # 处理数据格式
            if isinstance(batch, (list, tuple)) and len(batch) == 3:
                images, texts, labels = batch
            else:
                images = batch['images']
                texts = batch['texts']
                labels = batch['labels']

            images = images.to(config.device)

            # texts可能是dict或list
            if isinstance(texts, dict):
                texts = {k: v.to(config.device) for k, v in texts.items()}
            else:
                # 如果是list，需要转换
                from transformers import AutoTokenizer
                tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')
                texts = tokenizer("Endoscopic image", return_tensors='pt')
                texts = {k: v.to(config.device) for k, v in texts.items()}

            labels = labels.to(config.device)

            optimizer.zero_grad()

            outputs = model(images=images, text_inputs=texts)
            loss = criterion(outputs, labels)

            loss.backward()
            optimizer.step()

            train_loss += loss.item()
            _, predicted = outputs.max(1)
            train_total += labels.size(0)
            train_correct += predicted.eq(labels).sum().item()

            # 限制每个epoch的时间（如果太慢）
            if batch_idx > 50:  # 只训练前50个batch作为测试
                print(f"  限制batch数量到50个（快速测试）")
                break

        train_acc = 100. * train_correct / train_total
        train_time = time.time() - start_time

        print(f"\n训练结果:")
        print(f"  Loss: {train_loss/len(train_loader):.4f}")
        print(f"  Acc: {train_acc:.2f}%")
        print(f"  时间: {train_time:.1f}秒")

        # 验证
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0

        with torch.no_grad():
            for batch_idx, (images, texts, labels) in enumerate(val_loader):
                if batch_idx > 20:  # 验证也限制
                    break

                images = images.to(config.device)
                texts = {k: v.to(config.device) for k, v in texts.items()}
                labels = labels.to(config.device)

                outputs = model(images=images, text_inputs=texts)
                loss = criterion(outputs, labels)

                val_loss += loss.item()
                _, predicted = outputs.max(1)
                val_total += labels.size(0)
                val_correct += predicted.eq(labels).sum().item()

        val_acc = 100. * val_correct / val_total

        print(f"\n验证结果:")
        print(f"  Loss: {val_loss/len(val_loader):.4f}")
        print(f"  Acc: {val_acc:.2f}%")

        # 保存最佳模型
        if val_acc > best_val_acc:
            best_val_acc = val_acc
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'val_acc': val_acc,
                'config': config.__dict__
            }, config.save_dir / "best_model.pth")
            print(f"  保存最佳模型 (Acc: {val_acc:.2f}%)")

        # 检查是否合理
        print(f"\n性能评估:")
        print(f"  训练准确率: {train_acc:.2f}%")
        print(f"  验证准确率: {val_acc:.2f}%")

        if train_acc < 30:
            print(f"  ⚠️ 警告: 训练准确率太低 ({train_acc:.2f}%)")

        if val_acc < 20:
            print(f"  ⚠️ 警告: 验证准确率太低 ({val_acc:.2f}%)")

        if train_time > 300:  # 超过5分钟
            print(f"  ⚠️ 警告: 训练时间太长 ({train_time:.1f}秒)")
            print("  建议减少batch大小或模型复杂度")

        # 每个epoch后保存
        torch.save({
            'epoch': epoch,
            'model_state_dict': model.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'train_acc': train_acc,
            'val_acc': val_acc,
            'config': config.__dict__
        }, config.save_dir / f"model_epoch_{epoch+1}.pth")

    print("\n训练完成!")
    print(f"最佳验证准确率: {best_val_acc:.2f}%")

if __name__ == "__main__":
    main()