#!/usr/bin/env python3
"""
修复版训练脚本 - 解决数据泄露和张量维度问题
"""

import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from sklearn.metrics import classification_report
from pathlib import Path
import time
from datetime import datetime
import random
from collections import defaultdict, Counter

# 获取uc_model目录路径
UC_MODEL_DIR = Path(__file__).parent
sys.path.insert(0, str(UC_MODEL_DIR))

from utils.dataset import UCEISDataset, train_transform, val_transform
from models.fusion_model import EndoMultimodalModel

class FixedConfig:
    """修复版训练配置"""
    # 数据
    data_dir = r"D:\肠内镜数据库\UCEIS1-8"
    batch_size = 8  # 减小batch size避免内存问题
    num_workers = 2
    max_data_per_class = 500  # 限制每个类别的数据量，平衡数据

    # 模型
    image_pretrained = False  # 不使用预训练，快速开始
    text_pretrained = 'bert-base-uncased'
    feature_dim = 256  # 与模型匹配的特征维度
    num_classes = 8

    # 训练
    epochs = 5  # 增加epoch数但限制数据量
    lr = 2e-4  # 稍微提高学习率
    weight_decay = 1e-5

    # 其他
    seed = 42
    save_dir = UC_MODEL_DIR / "outputs" / "fixed_training"
    device = 'cuda' if torch.cuda.is_available() else 'cpu'

def split_dataset_by_unique_files(data_path, val_ratio=0.2, seed=42):
    """按唯一文件名分割数据集，避免数据泄露"""

    # 收集所有文件
    all_files = list(data_path.rglob("*.bmp"))

    # 按文件名分组（去掉路径和后缀）
    file_groups = defaultdict(list)
    for file_path in all_files:
        # 提取基础文件名（不带路径和扩展名）
        base_name = file_path.stem  # 例如 "1分_001"
        file_groups[base_name].append(file_path)

    print(f"找到 {len(all_files)} 个图像文件")
    print(f"唯一文件名组数: {len(file_groups)}")

    # 检查是否有重复的基础文件名
    duplicates = {name: files for name, files in file_groups.items() if len(files) > 1}
    if duplicates:
        print(f"警告: 发现 {len(duplicates)} 个重复的基础文件名")
        for name, files in list(duplicates.items())[:5]:
            print(f"  {name}: {len(files)} 个文件")

    # 获取每个文件的标签
    def extract_label_from_path(file_path):
        """从路径中提取标签（分数）"""
        path_str = str(file_path)
        for score in range(1, 9):
            if f"{score}分" in path_str:
                # 转换为0-7的类别索引
                return score - 1
        return 0  # 默认值

    # 为每个文件组分配标签和数据
    labeled_groups = []
    for base_name, files in file_groups.items():
        label = extract_label_from_path(files[0])
        labeled_groups.append({
            'base_name': base_name,
            'files': files,
            'label': label
        })

    # 按标签分组
    label_groups = defaultdict(list)
    for group in labeled_groups:
        label_groups[group['label']].append(group)

    # 对每个标签进行分层抽样
    train_files = []
    val_files = []

    random.seed(seed)

    for label, groups in label_groups.items():
        # 随机打乱
        random.shuffle(groups)

        # 限制每个类别的数据量
        if len(groups) > FixedConfig.max_data_per_class:
            groups = groups[:FixedConfig.max_data_per_class]
            print(f"标签 {label+1}: 限制数据量到 {FixedConfig.max_data_per_class} 个文件组")

        # 分割
        split_idx = int(len(groups) * (1 - val_ratio))
        train_groups = groups[:split_idx]
        val_groups = groups[split_idx:]

        # 添加文件到对应集合
        for group in train_groups:
            train_files.extend(group['files'])
        for group in val_groups:
            val_files.extend(group['files'])

        print(f"标签 {label+1}: 训练组 {len(train_groups)} 个, 验证组 {len(val_groups)} 个")

    print(f"\n最终分割:")
    print(f"训练文件: {len(train_files)}")
    print(f"验证文件: {len(val_files)}")

    # 统计数据分布
    train_labels = [extract_label_from_path(f) for f in train_files]
    val_labels = [extract_label_from_path(f) for f in val_files]

    print("\n训练集分布:")
    train_dist = Counter(train_labels)
    for label in sorted(train_dist.keys()):
        print(f"  {label+1}分: {train_dist[label]} 张")

    print("\n验证集分布:")
    val_dist = Counter(val_labels)
    for label in sorted(val_dist.keys()):
        print(f"  {label+1}分: {val_dist[label]} 张")

    # 检查是否有基础文件名跨越训练集和验证集
    train_base_names = {f.stem for f in train_files}
    val_base_names = {f.stem for f in val_files}
    overlap = train_base_names.intersection(val_base_names)

    if overlap:
        print(f"\n[WARNING] 发现 {len(overlap)} 个基础文件名同时存在于训练集和验证集!")
        print("这会导致数据泄露!")
        return split_dataset_by_unique_files(data_path, val_ratio=0.15, seed=seed+1)
    else:
        print("\n[OK] 数据分割正确，没有数据泄露")

    return train_files, val_files

def main():
    print("="*60)
    print("修复版训练开始")
    print("="*60)

    config = FixedConfig()

    # 创建保存目录
    config.save_dir.mkdir(parents=True, exist_ok=True)

    # 设置随机种子
    torch.manual_seed(config.seed)
    random.seed(config.seed)
    np.random.seed(config.seed)

    print(f"设备: {config.device}")
    print(f"数据目录: {config.data_dir}")

    # 检查数据
    data_path = Path(config.data_dir)
    if not data_path.exists():
        print(f"错误: 数据目录不存在 - {config.data_dir}")
        return

    # 修复数据分割
    print("\n修复数据分割...")
    train_files, val_files = split_dataset_by_unique_files(data_path, val_ratio=0.2)

    # 创建数据集
    print("\n创建数据集...")
    train_dataset = UCEISDataset(
        file_list=train_files,
        transform=train_transform,
        mode='train'
    )

    val_dataset = UCEISDataset(
        file_list=val_files,
        transform=val_transform,
        mode='val'
    )

    print(f"训练集样本数: {len(train_dataset)}")
    print(f"验证集样本数: {len(val_dataset)}")

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset,
        batch_size=config.batch_size,
        shuffle=True,
        num_workers=config.num_workers,
        pin_memory=True,
        drop_last=True  # 避免最后一个batch不完整
    )

    val_loader = DataLoader(
        val_dataset,
        batch_size=config.batch_size,
        shuffle=False,
        num_workers=config.num_workers,
        pin_memory=True
    )

    # 创建模型
    print("\n创建模型...")
    model = EndoMultimodalModel(
        image_pretrained=config.image_pretrained,
        text_pretrained=config.text_pretrained,
        feature_dim=config.feature_dim,
        num_classes=config.num_classes
    ).to(config.device)

    param_count = sum(p.numel() for p in model.parameters())
    trainable_param_count = sum(p.numel() for p in model.parameters() if p.requires_grad)

    print(f"模型参数量: {param_count:,}")
    print(f"可训练参数: {trainable_param_count:,}")

    # 优化器和损失函数
    optimizer = optim.AdamW(
        model.parameters(),
        lr=config.lr,
        weight_decay=config.weight_decay
    )

    # 使用类别权重平衡损失
    train_labels = [item[1] for item in train_dataset]  # 假设dataset返回(image, text, label)
    label_counts = Counter(train_labels)
    total = len(train_labels)
    class_weights = [total / (config.num_classes * label_counts[i]) for i in range(config.num_classes)]
    class_weights = torch.FloatTensor(class_weights).to(config.device)

    criterion = nn.CrossEntropyLoss(weight=class_weights)

    # 训练循环
    print("\n开始训练...")
    best_val_acc = 0.0
    best_val_f1 = 0.0

    for epoch in range(config.epochs):
        print(f"\nEpoch {epoch+1}/{config.epochs}")
        print("-"*40)

        # 训练
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0
        train_preds = []
        train_targets = []

        start_time = time.time()

        for batch_idx, batch in enumerate(train_loader):
            # 处理数据格式
            if isinstance(batch, (list, tuple)) and len(batch) == 3:
                images, texts, labels = batch
            else:
                images = batch['images']
                texts = batch['texts']
                labels = batch['labels']

            images = images.to(config.device)
            labels = labels.to(config.device)

            # 修复文本处理 - 确保文本张量维度匹配
            if isinstance(texts, dict):
                texts = {k: v.to(config.device) for k, v in texts.items()}
                # 检查文本tensor的batch维度
                for key, value in texts.items():
                    if value.size(0) != images.size(0):
                        # 如果维度不匹配，需要复制
                        repeat_times = images.size(0) // value.size(0)
                        if value.size(0) == 1 and repeat_times > 1:
                            texts[key] = value.repeat(repeat_times, 1)
                        else:
                            # 如果无法复制，创建默认文本
                            from transformers import AutoTokenizer
                            tokenizer = AutoTokenizer.from_pretrained(config.text_pretrained)
                            default_text = "Endoscopic image"
                            texts = tokenizer(
                                [default_text] * images.size(0),
                                return_tensors='pt',
                                padding=True,
                                truncation=True,
                                max_length=128
                            )
                            texts = {k: v.to(config.device) for k, v in texts.items()}
                            break
            else:
                # 如果是list或其他格式，使用默认文本
                from transformers import AutoTokenizer
                tokenizer = AutoTokenizer.from_pretrained(config.text_pretrained)
                default_text = "Endoscopic image"
                texts = tokenizer(
                    [default_text] * images.size(0),
                    return_tensors='pt',
                    padding=True,
                    truncation=True,
                    max_length=128
                )
                texts = {k: v.to(config.device) for k, v in texts.items()}

            optimizer.zero_grad()

            # 前向传播
            outputs = model(images=images, text_inputs=texts)

            # 确保输出是正确的张量
            if isinstance(outputs, dict):
                if 'logits' in outputs:
                    logits = outputs['logits']
                else:
                    # 取第一个值
                    logits = list(outputs.values())[0]
            else:
                logits = outputs

            loss = criterion(logits, labels)

            # 反向传播
            loss.backward()

            # 梯度裁剪防止梯度爆炸
            torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)

            optimizer.step()

            train_loss += loss.item()
            _, predicted = logits.max(1)
            train_total += labels.size(0)
            train_correct += predicted.eq(labels).sum().item()

            train_preds.extend(predicted.cpu().numpy())
            train_targets.extend(labels.cpu().numpy())

            if batch_idx % 20 == 0:
                print(f"  Batch {batch_idx+1}/{len(train_loader)}, Loss: {loss.item():.4f}")

        train_acc = 100. * train_correct / train_total
        train_time = time.time() - start_time

        # 计算F1分数
        from sklearn.metrics import f1_score
        train_f1 = f1_score(train_targets, train_preds, average='weighted')

        print(f"\n训练结果:")
        print(f"  Loss: {train_loss/len(train_loader):.4f}")
        print(f"  Acc: {train_acc:.2f}%")
        print(f"  F1: {train_f1:.4f}")
        print(f"  时间: {train_time:.1f}秒")

        # 验证
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0
        val_preds = []
        val_targets = []

        with torch.no_grad():
            for batch_idx, batch in enumerate(val_loader):
                if isinstance(batch, (list, tuple)) and len(batch) == 3:
                    images, texts, labels = batch
                else:
                    images = batch['images']
                    texts = batch['texts']
                    labels = batch['labels']

                images = images.to(config.device)
                labels = labels.to(config.device)

                # 处理文本（与训练时相同）
                if isinstance(texts, dict):
                    texts = {k: v.to(config.device) for k, v in texts.items()}
                    # 检查维度
                    for key, value in texts.items():
                        if value.size(0) != images.size(0):
                            repeat_times = images.size(0) // value.size(0)
                            if value.size(0) == 1 and repeat_times > 1:
                                texts[key] = value.repeat(repeat_times, 1)
                            else:
                                from transformers import AutoTokenizer
                                tokenizer = AutoTokenizer.from_pretrained(config.text_pretrained)
                                default_text = "Endoscopic image"
                                texts = tokenizer(
                                    [default_text] * images.size(0),
                                    return_tensors='pt',
                                    padding=True,
                                    truncation=True,
                                    max_length=128
                                )
                                texts = {k: v.to(config.device) for k, v in texts.items()}
                                break

                outputs = model(images=images, text_inputs=texts)

                if isinstance(outputs, dict):
                    if 'logits' in outputs:
                        logits = outputs['logits']
                    else:
                        logits = list(outputs.values())[0]
                else:
                    logits = outputs

                loss = criterion(logits, labels)

                val_loss += loss.item()
                _, predicted = logits.max(1)
                val_total += labels.size(0)
                val_correct += predicted.eq(labels).sum().item()

                val_preds.extend(predicted.cpu().numpy())
                val_targets.extend(labels.cpu().numpy())

        val_acc = 100. * val_correct / val_total
        val_f1 = f1_score(val_targets, val_preds, average='weighted')

        print(f"\n验证结果:")
        print(f"  Loss: {val_loss/len(val_loader):.4f}")
        print(f"  Acc: {val_acc:.2f}%")
        print(f"  F1: {val_f1:.4f}")

        # 保存最佳模型
        if val_f1 > best_val_f1:
            best_val_f1 = val_f1
            best_val_acc = val_acc
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'val_acc': val_acc,
                'val_f1': val_f1,
                'train_acc': train_acc,
                'train_f1': train_f1,
                'config': config.__dict__,
                'class_weights': class_weights.cpu().numpy()
            }, config.save_dir / "best_model.pth")
            print(f"  ✅ 保存最佳模型 (F1: {val_f1:.4f})")

        # 性能检查
        print(f"\n性能评估:")
        print(f"  训练准确率: {train_acc:.2f}%")
        print(f"  验证准确率: {val_acc:.2f}%")

        # 检查过拟合
        if train_acc - val_acc > 30:
            print(f"  ⚠️ 警告: 可能过拟合 (差距: {train_acc - val_acc:.2f}%)")

        if val_acc < 30:
            print(f"  ⚠️ 警告: 验证准确率较低 ({val_acc:.2f}%)")

        if train_time > 300:
            print(f"  ⚠️ 警告: 训练时间较长 ({train_time:.1f}秒)")

    print("\n训练完成!")
    print(f"最佳验证准确率: {best_val_acc:.2f}%")
    print(f"最佳F1分数: {best_val_f1:.4f}")

if __name__ == "__main__":
    main()