#!/usr/bin/env python3
"""
优化版训练脚本 - 提高训练速度
"""
import os
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'

import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from pathlib import Path
import time
import random
from collections import defaultdict, Counter
from datetime import datetime

# 获取uc_model目录路径
UC_MODEL_DIR = Path(__file__).parent
sys.path.insert(0, str(UC_MODEL_DIR))

from utils.dataset import UCEISDataset, train_transform, val_transform
from models.fusion_model import EndoMultimodalModel
from transformers import BertTokenizer

# 配置参数
config = {
    'data_dir': r"D:\肠内镜数据库\UCEIS1-8",
    'batch_size': 32,  # 增大batch size提高效率
    'epochs': 5,
    'lr': 2e-4,
    'device': 'cuda' if torch.cuda.is_available() else 'cpu',
    'seed': 42,
    'save_interval': 1,
    'print_interval': 10,  # 减少打印频率
}

# 设置随机种子
torch.manual_seed(config['seed'])
random.seed(config['seed'])
np.random.seed(config['seed'])

# 预先加载tokenizer（避免重复加载）
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
default_texts = ["Endoscopic image showing ulcerative colitis"] * config['batch_size']

def log_info(msg):
    print(msg)

def collate_fn(batch):
    """优化的批处理函数"""
    images, texts, labels = zip(*batch)

    # 图像处理
    images = torch.stack(images)

    # 文本处理（使用预加载的tokenizer）
    # 对于大批次，使用默认文本
    batch_texts = list(texts)

    # 只对前几个使用实际文本，其余使用默认文本以节省时间
    if len(batch_texts) > 5:
        batch_texts = batch_texts[:5] + ["Endoscopic image"] * (len(batch_texts) - 5)

    text_inputs = tokenizer(
        batch_texts,
        padding=True,
        truncation=True,
        max_length=64,  # 减少max_length
        return_tensors='pt'
    )

    labels = torch.stack(labels)

    return images, text_inputs, labels

def main():
    log_info("="*60)
    log_info("UCEIS模型训练 - 优化版")
    log_info("="*60)
    log_info(f"开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    log_info(f"设备: {config['device']}")
    log_info(f"批次大小: {config['batch_size']}")
    log_info(f"学习率: {config['lr']}")

    # 1. 加载数据
    log_info("\n[步骤1] 加载数据...")
    data_path = Path(config['data_dir'])
    all_files = list(data_path.rglob("*.bmp"))
    log_info(f"总图像数: {len(all_files)}")

    # 提取标签
    def extract_label(file_path):
        path_str = str(file_path)
        for score in range(1, 9):
            if f"{score}分" in path_str:
                return score - 1
        return 0

    # 收集文件和标签
    file_labels = [(f, extract_label(f)) for f in all_files]

    # 按标签分组
    label_groups = defaultdict(list)
    for f, label in file_labels:
        label_groups[label].append(f)

    log_info("\n标签分布:")
    for label in sorted(label_groups.keys()):
        log_info(f"  {label+1}分: {len(label_groups[label])} 张")

    # 分割数据
    train_files = []
    val_files = []

    random.seed(config['seed'])
    for label, files in label_groups.items():
        random.shuffle(files)
        split_idx = int(len(files) * 0.8)
        train_files.extend(files[:split_idx])
        val_files.extend(files[split_idx:])

    log_info(f"\n数据分割:")
    log_info(f"训练文件: {len(train_files)}")
    log_info(f"验证文件: {len(val_files)}")

    # 2. 创建数据集
    log_info("\n[步骤2] 创建数据集...")
    train_dataset = UCEISDataset(
        file_list=train_files,
        transform=train_transform,
        mode='train'
    )

    val_dataset = UCEISDataset(
        file_list=val_files,
        transform=val_transform,
        mode='val'
    )

    log_info(f"训练集样本数: {len(train_dataset)}")
    log_info(f"验证集样本数: {len(val_dataset)}")

    # 3. 创建数据加载器（使用优化的collate_fn）
    log_info("\n[步骤3] 创建数据加载器...")
    train_loader = DataLoader(
        train_dataset,
        batch_size=config['batch_size'],
        shuffle=True,
        num_workers=0,  # Windows下设为0
        pin_memory=True,
        collate_fn=collate_fn
    )

    val_loader = DataLoader(
        val_dataset,
        batch_size=config['batch_size'],
        shuffle=False,
        num_workers=0,
        pin_memory=True,
        collate_fn=collate_fn
    )

    log_info(f"训练批次数: {len(train_loader)}")
    log_info(f"验证批次数: {len(val_loader)}")

    # 4. 创建模型
    log_info("\n[步骤4] 创建模型...")
    model = EndoMultimodalModel(
        image_pretrained=False,
        text_pretrained='bert-base-uncased',
        feature_dim=256,
        num_classes=8
    ).to(config['device'])

    param_count = sum(p.numel() for p in model.parameters())
    log_info(f"模型参数量: {param_count:,}")

    # 5. 优化器和损失函数
    log_info("\n[步骤5] 设置优化器和损失函数...")
    optimizer = optim.AdamW(model.parameters(), lr=config['lr'], weight_decay=1e-5)
    criterion = nn.CrossEntropyLoss()

    # 6. 训练循环
    log_info("\n[步骤6] 开始训练...")
    best_val_acc = 0.0
    train_start_time = time.time()

    for epoch in range(config['epochs']):
        epoch_start_time = time.time()
        log_info(f"\n{'='*50}")
        log_info(f"Epoch {epoch+1}/{config['epochs']}")
        log_info(f"{'='*50}")

        # 训练阶段
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0

        for batch_idx, batch in enumerate(train_loader):
            batch_start_time = time.time()

            # 处理数据
            images, texts, labels = batch
            images = images.to(config['device'])
            labels = labels.to(config['device'])

            # 文本已经在collate_fn中处理好了
            texts = {k: v.to(config['device']) for k, v in texts.items()}

            # 前向传播
            optimizer.zero_grad()
            outputs = model(images=images, text_inputs=texts)

            if isinstance(outputs, dict):
                logits = outputs.get('logits', list(outputs.values())[0])
            else:
                logits = outputs

            loss = criterion(logits, labels)
            loss.backward()
            optimizer.step()

            # 统计
            train_loss += loss.item()
            _, predicted = logits.max(1)
            train_total += labels.size(0)
            train_correct += predicted.eq(labels).sum().item()

            # 打印进度
            if batch_idx % config['print_interval'] == 0:
                current_acc = 100. * train_correct / train_total
                batch_time = time.time() - batch_start_time
                eta = batch_time * (len(train_loader) - batch_idx - 1) / 60

                log_info(f"  Batch {batch_idx+1}/{len(train_loader)} | "
                        f"Loss: {loss.item():.4f} | "
                        f"Acc: {current_acc:.2f}% | "
                        f"Time: {batch_time:.2f}s | "
                        f"ETA: {eta:.1f}min")

        # 训练结果
        train_acc = 100. * train_correct / train_total
        train_time = time.time() - epoch_start_time

        log_info(f"\n训练完成:")
        log_info(f"  平均Loss: {train_loss/len(train_loader):.4f}")
        log_info(f"  准确率: {train_acc:.2f}%")
        log_info(f"  用时: {train_time/60:.2f}分钟")

        # 验证阶段
        if epoch < 2:  # 前两个epoch做验证
            log_info("\n[验证阶段]")
            model.eval()
            val_loss = 0.0
            val_correct = 0
            val_total = 0

            with torch.no_grad():
                for batch_idx, batch in enumerate(val_loader):
                    images, texts, labels = batch
                    images = images.to(config['device'])
                    labels = labels.to(config['device'])
                    texts = {k: v.to(config['device']) for k, v in texts.items()}

                    outputs = model(images=images, text_inputs=texts)

                    if isinstance(outputs, dict):
                        logits = outputs.get('logits', list(outputs.values())[0])
                    else:
                        logits = outputs

                    loss = criterion(logits, labels)
                    val_loss += loss.item()
                    _, predicted = logits.max(1)
                    val_total += labels.size(0)
                    val_correct += predicted.eq(labels).sum().item()

            val_acc = 100. * val_correct / val_total

            log_info(f"\n验证结果:")
            log_info(f"  平均Loss: {val_loss/len(val_loader):.4f}")
            log_info(f"  准确率: {val_acc:.2f}%")
        else:
            val_acc = train_acc  # 跳过验证以节省时间

        # 保存模型
        if (epoch + 1) % config['save_interval'] == 0:
            save_dir = UC_MODEL_DIR / "outputs" / "fast_training"
            save_dir.mkdir(parents=True, exist_ok=True)

            save_path = save_dir / f"model_epoch_{epoch+1}.pth"
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_acc': train_acc,
                'val_acc': val_acc,
                'config': config
            }, save_path)

            log_info(f"\n模型已保存: {save_path}")

            # 保存最佳模型
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                best_path = save_dir / "best_model.pth"
                torch.save({
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'train_acc': train_acc,
                    'val_acc': val_acc,
                    'best_val_acc': best_val_acc,
                    'config': config
                }, best_path)
                log_info(f"✅ 最佳模型已更新! (准确率: {best_val_acc:.2f}%)")

        # 如果训练表现太差，提前停止
        if train_acc < 30 and epoch >= 2:
            log_info("\n⚠️ 训练准确率过低，停止训练")
            break

    # 训练完成
    total_training_time = time.time() - train_start_time
    log_info("\n" + "="*60)
    log_info("训练完成!")
    log_info(f"总用时: {total_training_time/60:.2f}分钟")
    log_info(f"最佳验证准确率: {best_val_acc:.2f}%")
    log_info(f"结束时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    log_info("="*60)

if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        log_info("\n\n⚠️ 训练被用户中断!")
    except Exception as e:
        log_info(f"\n❌ 训练出错: {str(e)}")
        raise