#!/usr/bin/env python3
"""
简化训练脚本 - 直接运行，输出到控制台
"""

import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from pathlib import Path
import time
import random
from collections import defaultdict, Counter

# 设置环境变量，禁用TensorFlow日志
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'

# 获取uc_model目录路径
UC_MODEL_DIR = Path(__file__).parent
sys.path.insert(0, str(UC_MODEL_DIR))

from utils.dataset import UCEISDataset, train_transform, val_transform
from models.fusion_model import EndoMultimodalModel

# 配置
data_dir = r"D:\肠内镜数据库\UCEIS1-8"
batch_size = 8
epochs = 3
lr = 2e-4
device = 'cuda' if torch.cuda.is_available() else 'cpu'
seed = 42

# 设置随机种子
torch.manual_seed(seed)
random.seed(seed)

print("="*60)
print("简化训练开始")
print("="*60)
print(f"设备: {device}")
print(f"数据目录: {data_dir}")

# 加载数据
data_path = Path(data_dir)
all_files = list(data_path.rglob("*.bmp"))
print(f"\n总图像数: {len(all_files)}")

# 提取标签并分组
def extract_label(file_path):
    path_str = str(file_path)
    for score in range(1, 9):
        if f"{score}分" in path_str:
            return score - 1  # 0-7
    return 0

# 收集文件和标签
file_labels = [(f, extract_label(f)) for f in all_files]

# 按标签分组
label_groups = defaultdict(list)
for f, label in file_labels:
    label_groups[label].append(f)

print(f"\n找到的标签分布:")
for label in sorted(label_groups.keys()):
    print(f"  {label+1}分: {len(label_groups[label])} 张")

# 分割数据
train_files = []
val_files = []

random.seed(seed)
for label, files in label_groups.items():
    random.shuffle(files)
    split_idx = int(len(files) * 0.8)
    train_files.extend(files[:split_idx])
    val_files.extend(files[split_idx:])

print(f"\n数据分割:")
print(f"训练文件: {len(train_files)}")
print(f"验证文件: {len(val_files)}")

# 创建数据集
train_dataset = UCEISDataset(
    file_list=train_files,
    transform=train_transform,
    mode='train'
)

val_dataset = UCEISDataset(
    file_list=val_files,
    transform=val_transform,
    mode='val'
)

print(f"\n数据集大小:")
print(f"训练集: {len(train_dataset)}")
print(f"验证集: {len(val_dataset)}")

# 数据加载器
train_loader = DataLoader(
    train_dataset,
    batch_size=batch_size,
    shuffle=True,
    num_workers=0,  # 避免多进程问题
    pin_memory=True
)

val_loader = DataLoader(
    val_dataset,
    batch_size=batch_size,
    shuffle=False,
    num_workers=0,
    pin_memory=True
)

# 创建模型
print("\n创建模型...")
model = EndoMultimodalModel(
    image_pretrained=False,
    text_pretrained='bert-base-uncased',
    feature_dim=256,
    num_classes=8
).to(device)

param_count = sum(p.numel() for p in model.parameters())
print(f"模型参数量: {param_count:,}")

# 优化器和损失函数
optimizer = optim.AdamW(model.parameters(), lr=lr)
criterion = nn.CrossEntropyLoss()

# 训练循环
print("\n开始训练...")

for epoch in range(epochs):
    print(f"\nEpoch {epoch+1}/{epochs}")
    print("-"*40)

    # 训练
    model.train()
    train_loss = 0.0
    train_correct = 0
    train_total = 0

    start_time = time.time()

    for batch_idx, batch in enumerate(train_loader):
        # 处理数据
        if isinstance(batch, (list, tuple)) and len(batch) == 3:
            images, texts, labels = batch
        else:
            images = batch['images']
            texts = batch['texts']
            labels = batch['labels']

        images = images.to(device)
        labels = labels.to(device)

        # 处理文本 - 将文本字符串转换为tokenized tensors
        from transformers import AutoTokenizer
        tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')

        if isinstance(texts, (tuple, list)):
            # texts是字符串列表，需要tokenize
            texts = tokenizer(
                list(texts),
                padding=True,
                truncation=True,
                max_length=128,
                return_tensors='pt'
            )
        elif isinstance(texts, str):
            # 单个文本字符串
            texts = tokenizer(
                texts,
                padding=True,
                truncation=True,
                max_length=128,
                return_tensors='pt'
            )

        texts = {k: v.to(device) for k, v in texts.items()}

        optimizer.zero_grad()

        outputs = model(images=images, text_inputs=texts)

        if isinstance(outputs, dict):
            logits = outputs.get('logits', list(outputs.values())[0])
        else:
            logits = outputs

        loss = criterion(logits, labels)
        loss.backward()
        optimizer.step()

        train_loss += loss.item()
        _, predicted = logits.max(1)
        train_total += labels.size(0)
        train_correct += predicted.eq(labels).sum().item()

        if batch_idx % 10 == 0:
            print(f"  Batch {batch_idx+1}/{len(train_loader)}, Loss: {loss.item():.4f}")

    train_acc = 100. * train_correct / train_total
    train_time = time.time() - start_time

    print(f"\n训练结果:")
    print(f"  Loss: {train_loss/len(train_loader):.4f}")
    print(f"  Acc: {train_acc:.2f}%")
    print(f"  时间: {train_time:.1f}秒")

    # 验证
    model.eval()
    val_loss = 0.0
    val_correct = 0
    val_total = 0

    with torch.no_grad():
        for batch in val_loader:
            if isinstance(batch, (list, tuple)) and len(batch) == 3:
                images, texts, labels = batch
            else:
                images = batch['images']
                texts = batch['texts']
                labels = batch['labels']

            images = images.to(device)
            labels = labels.to(device)

            # 处理文本 - 将文本字符串转换为tokenized tensors
            from transformers import AutoTokenizer
            tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')

            if isinstance(texts, (tuple, list)):
                # texts是字符串列表，需要tokenize
                texts = tokenizer(
                    list(texts),
                    padding=True,
                    truncation=True,
                    max_length=128,
                    return_tensors='pt'
                )
            elif isinstance(texts, str):
                # 单个文本字符串
                texts = tokenizer(
                    texts,
                    padding=True,
                    truncation=True,
                    max_length=128,
                    return_tensors='pt'
                )

            texts = {k: v.to(device) for k, v in texts.items()}

            outputs = model(images=images, text_inputs=texts)

            if isinstance(outputs, dict):
                logits = outputs.get('logits', list(outputs.values())[0])
            else:
                logits = outputs

            loss = criterion(logits, labels)
            val_loss += loss.item()
            _, predicted = logits.max(1)
            val_total += labels.size(0)
            val_correct += predicted.eq(labels).sum().item()

    val_acc = 100. * val_correct / val_total

    print(f"\n验证结果:")
    print(f"  Loss: {val_loss/len(val_loader):.4f}")
    print(f"  Acc: {val_acc:.2f}%")

    # 保存模型
    save_dir = UC_MODEL_DIR / "outputs" / "simple_training"
    save_dir.mkdir(parents=True, exist_ok=True)

    torch.save({
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
        'optimizer_state_dict': optimizer.state_dict(),
        'train_acc': train_acc,
        'val_acc': val_acc,
    }, save_dir / f"model_epoch_{epoch+1}.pth")

    print(f"  已保存模型到: {save_dir / f'model_epoch_{epoch+1}.pth'}")

print("\n训练完成!")