#!/usr/bin/env python3
"""
极速训练脚本 - 优化速度
"""
import os
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'

import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from pathlib import Path
import time
import random
from collections import defaultdict, Counter
from datetime import datetime

# 获取uc_model目录路径
UC_MODEL_DIR = Path(__file__).parent
sys.path.insert(0, str(UC_MODEL_DIR))

from utils.dataset import UCEISDataset, train_transform, val_transform

# 配置参数
config = {
    'data_dir': r"D:\肠内镜数据库\UCEIS1-8",
    'batch_size': 64,  # 大幅增加batch size
    'epochs': 5,
    'lr': 1e-3,  # 提高学习率
    'device': 'cuda' if torch.cuda.is_available() else 'cpu',
    'seed': 42,
    'save_interval': 1,
    'print_interval': 20,
}

# 设置随机种子
torch.manual_seed(config['seed'])
random.seed(config['seed'])
np.random.seed(config['seed'])

# 创建简化的模型
class SimpleUCEISModel(nn.Module):
    """简化的UCEIS模型 - 只用图像"""
    def __init__(self, num_classes=8):
        super().__init__()
        # 使用ResNet18作为backbone
        self.backbone = torch.hub.load('pytorch/vision:v0.10.0', 'resnet18', pretrained=False)
        num_ftrs = self.backbone.fc.in_features
        self.backbone.fc = nn.Sequential(
            nn.Linear(num_ftrs, 256),
            nn.ReLU(inplace=True),
            nn.Dropout(0.5),
            nn.Linear(256, num_classes)
        )

    def forward(self, x):
        return self.backbone(x)

def collate_fn(batch):
    """简化的批处理函数"""
    images, _, labels = zip(*batch)  # 忽略文本
    images = torch.stack(images)
    labels = torch.stack(labels)
    return images, labels

def main():
    print("="*60)
    print("UCEIS模型训练 - 极速版")
    print("="*60)
    print(f"开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print(f"设备: {config['device']}")
    print(f"批次大小: {config['batch_size']}")
    print(f"学习率: {config['lr']}")

    # 1. 加载数据
    print("\n[步骤1] 加载数据...")
    data_path = Path(config['data_dir'])
    all_files = list(data_path.rglob("*.bmp"))
    print(f"总图像数: {len(all_files)}")

    # 提取标签
    def extract_label(file_path):
        path_str = str(file_path)
        for score in range(1, 9):
            if f"{score}分" in path_str:
                return score - 1
        return 0

    # 收集文件和标签
    file_labels = [(f, extract_label(f)) for f in all_files]

    # 按标签分组
    label_groups = defaultdict(list)
    for f, label in file_labels:
        label_groups[label].append(f)

    print("\n标签分布:")
    for label in sorted(label_groups.keys()):
        print(f"  {label+1}分: {len(label_groups[label])} 张")

    # 分割数据
    train_files = []
    val_files = []

    random.seed(config['seed'])
    for label, files in label_groups.items():
        random.shuffle(files)
        split_idx = int(len(files) * 0.8)
        train_files.extend(files[:split_idx])
        val_files.extend(files[split_idx:])

    print(f"\n数据分割:")
    print(f"训练文件: {len(train_files)}")
    print(f"验证文件: {len(val_files)}")

    # 2. 创建数据集
    print("\n[步骤2] 创建数据集...")
    train_dataset = UCEISDataset(
        file_list=train_files,
        transform=train_transform,
        mode='train'
    )

    val_dataset = UCEISDataset(
        file_list=val_files,
        transform=val_transform,
        mode='val'
    )

    print(f"训练集样本数: {len(train_dataset)}")
    print(f"验证集样本数: {len(val_dataset)}")

    # 3. 创建数据加载器
    print("\n[步骤3] 创建数据加载器...")
    train_loader = DataLoader(
        train_dataset,
        batch_size=config['batch_size'],
        shuffle=True,
        num_workers=0,  # Windows下使用0
        pin_memory=True,
        collate_fn=collate_fn
    )

    val_loader = DataLoader(
        val_dataset,
        batch_size=config['batch_size'],
        shuffle=False,
        num_workers=0,
        pin_memory=True,
        collate_fn=collate_fn
    )

    print(f"训练批次数: {len(train_loader)}")
    print(f"验证批次数: {len(val_loader)}")

    # 4. 创建模型
    print("\n[步骤4] 创建模型...")
    model = SimpleUCEISModel(num_classes=8).to(config['device'])

    param_count = sum(p.numel() for p in model.parameters())
    print(f"模型参数量: {param_count:,}")

    # 5. 优化器和损失函数
    print("\n[步骤5] 设置优化器和损失函数...")
    optimizer = optim.Adam(model.parameters(), lr=config['lr'])
    criterion = nn.CrossEntropyLoss()

    # 6. 训练循环
    print("\n[步骤6] 开始训练...")
    best_val_acc = 0.0
    train_start_time = time.time()

    for epoch in range(config['epochs']):
        epoch_start_time = time.time()
        print(f"\n{'='*50}")
        print(f"Epoch {epoch+1}/{config['epochs']}")
        print(f"{'='*50}")

        # 训练阶段
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0

        for batch_idx, (images, labels) in enumerate(train_loader):
            batch_start_time = time.time()

            images = images.to(config['device'])
            labels = labels.to(config['device'])

            # 前向传播
            optimizer.zero_grad()
            outputs = model(images)
            loss = criterion(outputs, labels)
            loss.backward()
            optimizer.step()

            # 统计
            train_loss += loss.item()
            _, predicted = outputs.max(1)
            train_total += labels.size(0)
            train_correct += predicted.eq(labels).sum().item()

            # 打印进度
            if batch_idx % config['print_interval'] == 0:
                current_acc = 100. * train_correct / train_total
                batch_time = time.time() - batch_start_time
                eta = batch_time * (len(train_loader) - batch_idx - 1) / 60

                print(f"  Batch {batch_idx+1}/{len(train_loader)} | "
                        f"Loss: {loss.item():.4f} | "
                        f"Acc: {current_acc:.2f}% | "
                        f"Time: {batch_time:.2f}s | "
                        f"ETA: {eta:.1f}min")

        # 训练结果
        train_acc = 100. * train_correct / train_total
        train_time = time.time() - epoch_start_time

        print(f"\n训练完成:")
        print(f"  平均Loss: {train_loss/len(train_loader):.4f}")
        print(f"  准确率: {train_acc:.2f}%")
        print(f"  用时: {train_time/60:.2f}分钟")

        # 简单验证（只在最后两个epoch）
        if epoch >= config['epochs'] - 2:
            print("\n[验证阶段]")
            model.eval()
            val_loss = 0.0
            val_correct = 0
            val_total = 0

            with torch.no_grad():
                for images, labels in val_loader:
                    images = images.to(config['device'])
                    labels = labels.to(config['device'])

                    outputs = model(images)
                    loss = criterion(outputs, labels)
                    val_loss += loss.item()
                    _, predicted = outputs.max(1)
                    val_total += labels.size(0)
                    val_correct += predicted.eq(labels).sum().item()

            val_acc = 100. * val_correct / val_total
            print(f"\n验证结果:")
            print(f"  平均Loss: {val_loss/len(val_loader):.4f}")
            print(f"  准确率: {val_acc:.2f}%")
        else:
            val_acc = train_acc  # 跳过验证

        # 保存模型
        if (epoch + 1) % config['save_interval'] == 0:
            save_dir = UC_MODEL_DIR / "outputs" / "ultra_fast_training"
            save_dir.mkdir(parents=True, exist_ok=True)

            save_path = save_dir / f"model_epoch_{epoch+1}.pth"
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_acc': train_acc,
                'val_acc': val_acc,
                'config': config
            }, save_path)

            print(f"\n模型已保存: {save_path}")

            # 保存最佳模型
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                best_path = save_dir / "best_model.pth"
                torch.save({
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'train_acc': train_acc,
                    'val_acc': val_acc,
                    'best_val_acc': best_val_acc,
                    'config': config
                }, best_path)
                print(f"✅ 最佳模型已更新! (准确率: {best_val_acc:.2f}%)")

        # 如果训练表现不错，继续训练
        if train_acc > 80 and epoch >= 3:
            print("\n✅ 训练效果良好，继续训练...")
        elif train_acc < 30 and epoch >= 1:
            print("\n⚠️ 训练准确率较低，建议调整超参数")

    # 训练完成
    total_training_time = time.time() - train_start_time
    print("\n" + "="*60)
    print("训练完成!")
    print(f"总用时: {total_training_time/60:.2f}分钟")
    print(f"最佳验证准确率: {best_val_acc:.2f}%")
    print(f"结束时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    print("="*60)

if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        print("\n\n⚠️ 训练被用户中断!")
    except Exception as e:
        print(f"\n❌ 训练出错: {str(e)}")
        raise