#!/usr/bin/env python3
"""
带实时监控的训练脚本
"""
import os
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'

import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
import numpy as np
from pathlib import Path
import time
import random
from collections import defaultdict, Counter
from datetime import datetime
import logging

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('training.log', encoding='utf-8'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)

# 获取uc_model目录路径
UC_MODEL_DIR = Path(__file__).parent
sys.path.insert(0, str(UC_MODEL_DIR))

from utils.dataset import UCEISDataset, train_transform, val_transform
from models.fusion_model import EndoMultimodalModel

# 配置参数
config = {
    'data_dir': r"D:\肠内镜数据库\UCEIS1-8",
    'batch_size': 16,
    'epochs': 10,
    'lr': 2e-4,
    'device': 'cuda' if torch.cuda.is_available() else 'cpu',
    'seed': 42,
    'save_interval': 1,  # 每个epoch都保存
    'print_interval': 50,  # 每50个batch打印一次
}

# 设置随机种子
torch.manual_seed(config['seed'])
random.seed(config['seed'])
np.random.seed(config['seed'])

def log_info(msg):
    """同时输出到控制台和日志"""
    print(msg)
    logger.info(msg)

def main():
    log_info("="*60)
    log_info("UCEIS模型训练 - 带实时监控")
    log_info("="*60)
    log_info(f"开始时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    log_info(f"设备: {config['device']}")
    log_info(f"批次大小: {config['batch_size']}")
    log_info(f"学习率: {config['lr']}")

    # 1. 加载数据
    log_info("\n[步骤1] 加载数据...")
    data_path = Path(config['data_dir'])
    all_files = list(data_path.rglob("*.bmp"))
    log_info(f"总图像数: {len(all_files)}")

    # 提取标签
    def extract_label(file_path):
        path_str = str(file_path)
        for score in range(1, 9):
            if f"{score}分" in path_str:
                return score - 1
        return 0

    # 收集文件和标签
    file_labels = [(f, extract_label(f)) for f in all_files]

    # 按标签分组
    label_groups = defaultdict(list)
    for f, label in file_labels:
        label_groups[label].append(f)

    log_info("\n标签分布:")
    for label in sorted(label_groups.keys()):
        log_info(f"  {label+1}分: {len(label_groups[label])} 张")

    # 分割数据
    train_files = []
    val_files = []

    random.seed(config['seed'])
    for label, files in label_groups.items():
        random.shuffle(files)
        split_idx = int(len(files) * 0.8)
        train_files.extend(files[:split_idx])
        val_files.extend(files[split_idx:])

    log_info(f"\n数据分割:")
    log_info(f"训练文件: {len(train_files)}")
    log_info(f"验证文件: {len(val_files)}")

    # 2. 创建数据集
    log_info("\n[步骤2] 创建数据集...")
    train_dataset = UCEISDataset(
        file_list=train_files,
        transform=train_transform,
        mode='train'
    )

    val_dataset = UCEISDataset(
        file_list=val_files,
        transform=val_transform,
        mode='val'
    )

    log_info(f"训练集样本数: {len(train_dataset)}")
    log_info(f"验证集样本数: {len(val_dataset)}")

    # 3. 创建数据加载器
    log_info("\n[步骤3] 创建数据加载器...")
    train_loader = DataLoader(
        train_dataset,
        batch_size=config['batch_size'],
        shuffle=True,
        num_workers=2,
        pin_memory=True
    )

    val_loader = DataLoader(
        val_dataset,
        batch_size=config['batch_size'],
        shuffle=False,
        num_workers=2,
        pin_memory=True
    )

    log_info(f"训练批次数: {len(train_loader)}")
    log_info(f"验证批次数: {len(val_loader)}")

    # 4. 创建模型
    log_info("\n[步骤4] 创建模型...")
    model = EndoMultimodalModel(
        image_pretrained=False,
        text_pretrained='bert-base-uncased',
        feature_dim=256,
        num_classes=8
    ).to(config['device'])

    param_count = sum(p.numel() for p in model.parameters())
    trainable_param_count = sum(p.numel() for p in model.parameters() if p.requires_grad)

    log_info(f"模型参数量: {param_count:,}")
    log_info(f"可训练参数: {trainable_param_count:,}")

    # 5. 优化器和损失函数
    log_info("\n[步骤5] 设置优化器和损失函数...")
    optimizer = optim.AdamW(model.parameters(), lr=config['lr'], weight_decay=1e-5)
    criterion = nn.CrossEntropyLoss()

    # 6. 训练循环
    log_info("\n[步骤6] 开始训练...")
    best_val_acc = 0.0
    train_start_time = time.time()

    for epoch in range(config['epochs']):
        epoch_start_time = time.time()
        log_info(f"\n{'='*50}")
        log_info(f"Epoch {epoch+1}/{config['epochs']}")
        log_info(f"{'='*50}")

        # 训练阶段
        model.train()
        train_loss = 0.0
        train_correct = 0
        train_total = 0
        batch_times = []

        for batch_idx, batch in enumerate(train_loader):
            batch_start_time = time.time()

            # 处理数据
            images, texts, labels = batch
            images = images.to(config['device'])
            labels = labels.to(config['device'])

            # 文本处理
            from transformers import AutoTokenizer
            tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')

            if isinstance(texts, (tuple, list)):
                texts = tokenizer(
                    list(texts),
                    padding=True,
                    truncation=True,
                    max_length=128,
                    return_tensors='pt'
                )
            elif isinstance(texts, str):
                texts = tokenizer(
                    texts,
                    padding=True,
                    truncation=True,
                    max_length=128,
                    return_tensors='pt'
                )

            texts = {k: v.to(config['device']) for k, v in texts.items()}

            # 前向传播
            optimizer.zero_grad()
            outputs = model(images=images, text_inputs=texts)

            if isinstance(outputs, dict):
                logits = outputs.get('logits', list(outputs.values())[0])
            else:
                logits = outputs

            loss = criterion(logits, labels)
            loss.backward()
            optimizer.step()

            # 统计
            train_loss += loss.item()
            _, predicted = logits.max(1)
            train_total += labels.size(0)
            train_correct += predicted.eq(labels).sum().item()

            batch_time = time.time() - batch_start_time
            batch_times.append(batch_time)

            # 打印进度
            if batch_idx % config['print_interval'] == 0:
                current_acc = 100. * train_correct / train_total
                avg_batch_time = np.mean(batch_times[-config['print_interval']:])
                eta = avg_batch_time * (len(train_loader) - batch_idx - 1)

                log_info(f"  Batch {batch_idx+1}/{len(train_loader)} | "
                        f"Loss: {loss.item():.4f} | "
                        f"Acc: {current_acc:.2f}% | "
                        f"Time: {batch_time:.2f}s | "
                        f"ETA: {eta/60:.1f}min")

        # 训练结果
        train_acc = 100. * train_correct / train_total
        train_time = time.time() - epoch_start_time

        log_info(f"\n训练完成:")
        log_info(f"  平均Loss: {train_loss/len(train_loader):.4f}")
        log_info(f"  准确率: {train_acc:.2f}%")
        log_info(f"  用时: {train_time/60:.2f}分钟")

        # 验证阶段
        log_info("\n[验证阶段]")
        model.eval()
        val_loss = 0.0
        val_correct = 0
        val_total = 0

        with torch.no_grad():
            for batch_idx, batch in enumerate(val_loader):
                images, texts, labels = batch
                images = images.to(config['device'])
                labels = labels.to(config['device'])

                # 文本处理
                from transformers import AutoTokenizer
                tokenizer = AutoTokenizer.from_pretrained('bert-base-uncased')

                if isinstance(texts, (tuple, list)):
                    texts = tokenizer(
                        list(texts),
                        padding=True,
                        truncation=True,
                        max_length=128,
                        return_tensors='pt'
                    )
                elif isinstance(texts, str):
                    texts = tokenizer(
                        texts,
                        padding=True,
                        truncation=True,
                        max_length=128,
                        return_tensors='pt'
                    )

                texts = {k: v.to(config['device']) for k, v in texts.items()}

                outputs = model(images=images, text_inputs=texts)

                if isinstance(outputs, dict):
                    logits = outputs.get('logits', list(outputs.values())[0])
                else:
                    logits = outputs

                loss = criterion(logits, labels)
                val_loss += loss.item()
                _, predicted = logits.max(1)
                val_total += labels.size(0)
                val_correct += predicted.eq(labels).sum().item()

        val_acc = 100. * val_correct / val_total

        log_info(f"\n验证结果:")
        log_info(f"  平均Loss: {val_loss/len(val_loader):.4f}")
        log_info(f"  准确率: {val_acc:.2f}%")

        # 保存模型
        if (epoch + 1) % config['save_interval'] == 0:
            save_dir = UC_MODEL_DIR / "outputs" / "monitored_training"
            save_dir.mkdir(parents=True, exist_ok=True)

            save_path = save_dir / f"model_epoch_{epoch+1}.pth"
            torch.save({
                'epoch': epoch,
                'model_state_dict': model.state_dict(),
                'optimizer_state_dict': optimizer.state_dict(),
                'train_acc': train_acc,
                'val_acc': val_acc,
                'config': config
            }, save_path)

            log_info(f"\n模型已保存: {save_path}")

            # 保存最佳模型
            if val_acc > best_val_acc:
                best_val_acc = val_acc
                best_path = save_dir / "best_model.pth"
                torch.save({
                    'epoch': epoch,
                    'model_state_dict': model.state_dict(),
                    'optimizer_state_dict': optimizer.state_dict(),
                    'train_acc': train_acc,
                    'val_acc': val_acc,
                    'best_val_acc': best_val_acc,
                    'config': config
                }, best_path)
                log_info(f"✅ 最佳模型已更新! (准确率: {best_val_acc:.2f}%)")

        # Epoch总结
        epoch_time = time.time() - epoch_start_time
        total_time = time.time() - train_start_time
        log_info(f"\nEpoch {epoch+1} 总结:")
        log_info(f"  训练准确率: {train_acc:.2f}%")
        log_info(f"  验证准确率: {val_acc:.2f}%")
        log_info(f"  本轮用时: {epoch_time/60:.2f}分钟")
        log_info(f"  累计用时: {total_time/60:.2f}分钟")

        # 性能检查
        if train_acc > 90 and val_acc < 50:
            log_info("\n⚠️ 警告: 可能出现过拟合!")
        elif val_acc < 30:
            log_info("\n⚠️ 警告: 验证准确率过低!")

        # 提前停止判断
        if epoch >= 2:
            # 如果验证准确率连续3个epoch没有提升
            if val_acc < best_val_acc - 5:
                log_info("\n⚠️ 验证准确率开始下降，考虑提前停止训练")

    # 训练完成
    total_training_time = time.time() - train_start_time
    log_info("\n" + "="*60)
    log_info("训练完成!")
    log_info(f"总用时: {total_training_time/60:.2f}分钟")
    log_info(f"最佳验证准确率: {best_val_acc:.2f}%")
    log_info(f"结束时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}")
    log_info("="*60)

if __name__ == "__main__":
    try:
        main()
    except KeyboardInterrupt:
        log_info("\n\n⚠️ 训练被用户中断!")
        log.info("训练被用户中断")
    except Exception as e:
        log_info(f"\n❌ 训练出错: {str(e)}")
        logger.error(f"训练出错: {str(e)}", exc_info=True)
        raise