"""
UCEIS模型训练脚本 - 修复患者级别数据泄露版本
确保同一患者的图像不会同时出现在训练集和验证集
"""

import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from pathlib import Path
import numpy as np
from sklearn.metrics import accuracy_score, balanced_accuracy_score, f1_score
from tqdm import tqdm
import json
import logging
import glob
import random
from collections import defaultdict
from datetime import datetime

# 添加utils路径
sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))

from dataset import UCEISDataset, train_transform, val_transform, custom_collate_fn

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('training_patient_split.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


def extract_patient_id(file_path):
    """从文件路径提取患者ID"""
    parts = file_path.replace('\\', '/').split('/')
    for part in parts:
        if '{' in part and '}' in part:
            return part
    return None


def split_by_patients(all_files, train_ratio=0.8, random_seed=42):
    """
    按患者级别分割数据，避免数据泄露

    Args:
        all_files: 所有文件列表
        train_ratio: 训练集比例
        random_seed: 随机种子

    Returns:
        train_files, val_files
    """
    # 按患者ID分组
    patient_files = defaultdict(list)
    for file_path in all_files:
        patient_id = extract_patient_id(file_path)
        if patient_id:
            patient_files[patient_id].append(file_path)
        else:
            # 如果没有患者ID，按文件名分组
            filename = os.path.basename(file_path)
            patient_files[filename].append(file_path)

    logger.info(f"总患者数: {len(patient_files)}")

    # 设置随机种子
    random.seed(random_seed)

    # 获取患者列表并打乱
    patient_ids = list(patient_files.keys())
    random.shuffle(patient_ids)

    # 分割患者
    split_idx = int(len(patient_ids) * train_ratio)
    train_patients = patient_ids[:split_idx]
    val_patients = patient_ids[split_idx:]

    # 收集文件
    train_files = []
    val_files = []

    for patient_id in train_patients:
        train_files.extend(patient_files[patient_id])

    for patient_id in val_patients:
        val_files.extend(patient_files[patient_id])

    logger.info(f"训练集: {len(train_files)} 张图像 (来自 {len(train_patients)} 个患者)")
    logger.info(f"验证集: {len(val_files)} 张图像 (来自 {len(val_patients)} 个患者)")

    # 验证没有重叠
    train_set = set(train_files)
    val_set = set(val_files)
    overlap = train_set & val_set
    assert len(overlap) == 0, "数据分割错误：存在重叠文件"

    logger.info("✅ 数据分割验证通过，没有数据泄露")

    return train_files, val_files


class PatientSplitTrainer:
    """按患者分割的训练器"""

    def __init__(self, config):
        self.config = config
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        logger.info(f"使用设备: {self.device}")

        # 设置随机种子
        torch.manual_seed(config.seed)
        np.random.seed(config.seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(config.seed)

        # 初始化
        self.setup_model()
        self.setup_data()
        self.setup_optimizer()
        self.setup_logging()

        # 训练历史
        self.train_history = {
            'loss': [],
            'accuracy': [],
            'balanced_accuracy': [],
            'f1_macro': []
        }
        self.val_history = {
            'loss': [],
            'accuracy': [],
            'balanced_accuracy': [],
            'f1_macro': []
        }

        self.best_val_f1 = 0.0
        self.patience_counter = 0

    def setup_model(self):
        """初始化模型"""
        logger.info("初始化模型...")

        from models.fusion_model import EndoMultimodalModel

        self.model = EndoMultimodalModel(
            image_pretrained=self.config.image_pretrained,
            text_pretrained=self.config.text_pretrained,
            feature_dim=self.config.feature_dim,
            num_classes=self.config.num_classes
        )

        self.model = self.model.to(self.device)

        # 改进的初始化
        if hasattr(self.model, 'classifier'):
            for module in self.model.classifier.modules():
                if isinstance(module, nn.Linear):
                    nn.init.xavier_uniform_(module.weight)
                    nn.init.zeros_(module.bias)

        # 参数统计
        total_params = sum(p.numel() for p in self.model.parameters())
        trainable_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
        logger.info(f"模型参数量: {total_params:,}")
        logger.info(f"可训练参数: {trainable_params:,}")

        # 损失函数 - 使用类别权重处理不平衡
        self.criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    def setup_data(self):
        """设置数据加载器 - 使用患者级别分割"""
        logger.info(f"加载数据集: {self.config.data_dir}")

        # 获取所有图像文件
        all_bmp_files = glob.glob(os.path.join(self.config.data_dir, '**', '*.bmp'), recursive=True)

        if not all_bmp_files:
            raise FileNotFoundError(f"在 {self.config.data_dir} 中未找到任何 .bmp 文件。")

        logger.info(f"总图像数: {len(all_bmp_files)}")

        # 使用患者级别分割
        train_files, val_files = split_by_patients(
            all_bmp_files,
            train_ratio=0.8,  # 80%训练，20%验证（确保验证集足够大）
            random_seed=self.config.seed
        )

        # 确保验证集足够大
        if len(val_files) < 500:
            logger.warning(f"验证集较小 ({len(val_files)} 张)，考虑增加验证集比例")

        # 创建数据集
        train_dataset = UCEISDataset(train_files, transform=train_transform)
        val_dataset = UCEISDataset(val_files, transform=val_transform)

        logger.info(f"训练集样本数: {len(train_dataset)}")
        logger.info(f"验证集样本数: {len(val_dataset)}")

        # 创建collate函数
        train_collate_fn = lambda batch: custom_collate_fn(batch, self.device)
        val_collate_fn = lambda batch: custom_collate_fn(batch, self.device)

        # 创建数据加载器
        self.train_loader = DataLoader(
            train_dataset,
            batch_size=self.config.batch_size,
            shuffle=True,
            num_workers=self.config.train_num_workers,
            pin_memory=self.device.type == 'cuda',
            collate_fn=train_collate_fn,
            drop_last=True
        )

        self.val_loader = DataLoader(
            val_dataset,
            batch_size=self.config.batch_size,
            shuffle=False,
            num_workers=self.config.val_num_workers,
            pin_memory=self.device.type == 'cuda',
            collate_fn=val_collate_fn,
            drop_last=False
        )

        logger.info("数据加载器创建完成")

    def setup_optimizer(self):
        """设置优化器"""
        # 分层学习率
        base_lr = self.config.lr
        param_groups = [
            {
                'params': self.model.image_branch.parameters(),
                'lr': base_lr * 0.1,
                'weight_decay': self.config.weight_decay
            },
            {
                'params': self.model.text_branch.parameters(),
                'lr': base_lr * 0.1,
                'weight_decay': self.config.weight_decay
            },
            {
                'params': self.model.fusion_layers.parameters(),
                'lr': base_lr,
                'weight_decay': self.config.weight_decay
            },
            {
                'params': self.model.classifier.parameters(),
                'lr': base_lr,
                'weight_decay': self.config.weight_decay
            }
        ]

        self.optimizer = optim.AdamW(param_groups, weight_decay=self.config.weight_decay)

        # 学习率调度器
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer,
            mode='max',
            factor=0.5,
            patience=self.config.lr_patience
        )

        logger.info(f"优化器创建完成，学习率: {base_lr}")

    def setup_logging(self):
        """设置日志"""
        self.output_dir = Path(self.config.output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)

        # 保存配置
        config_path = self.output_dir / 'config_patient_split.json'
        with open(config_path, 'w', encoding='utf-8') as f:
            config_dict = {
                'data_dir': self.config.data_dir,
                'batch_size': self.config.batch_size,
                'epochs': self.config.epochs,
                'lr': self.config.lr,
                'num_classes': self.config.num_classes,
                'feature_dim': self.config.feature_dim,
                'seed': self.config.seed,
                'split_type': 'patient_level'
            }
            json.dump(config_dict, f, indent=2, ensure_ascii=False)

        logger.info(f"输出目录: {self.output_dir}")

    def train_one_epoch(self, epoch):
        """训练一个epoch"""
        self.model.train()
        epoch_loss = 0.0
        all_predictions = []
        all_labels = []

        progress_bar = tqdm(self.train_loader, desc=f'Epoch {epoch+1}/{self.config.epochs} [Train]')

        for batch_idx, batch in enumerate(progress_bar):
            # 移动数据到设备
            images = batch['images'].to(self.device)
            text_inputs = {k: v.to(self.device) for k, v in batch['text_inputs'].items()}
            labels = batch['labels'].to(self.device)

            self.optimizer.zero_grad()

            outputs = self.model(images, text_inputs)
            loss = self.criterion(outputs, labels)

            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)

            self.optimizer.step()

            epoch_loss += loss.item()
            predictions = torch.argmax(outputs, dim=1)
            all_predictions.extend(predictions.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())

            # 更新进度条
            current_lr = self.optimizer.param_groups[0]['lr']
            progress_bar.set_postfix({
                'loss': f'{loss.item():.4f}',
                'acc': f'{accuracy_score(all_labels[-len(labels):], all_predictions[-len(labels):]):.3f}',
                'lr': f'{current_lr:.6f}'
            })

        avg_loss = epoch_loss / len(self.train_loader)
        accuracy = accuracy_score(all_labels, all_predictions)
        balanced_acc = balanced_accuracy_score(all_labels, all_predictions)
        f1_macro = f1_score(all_labels, all_predictions, average='macro')

        self.train_history['loss'].append(avg_loss)
        self.train_history['accuracy'].append(accuracy)
        self.train_history['balanced_accuracy'].append(balanced_acc)
        self.train_history['f1_macro'].append(f1_macro)

        logger.info(f'Train - Loss: {avg_loss:.4f}, Acc: {accuracy:.4f}, Balanced Acc: {balanced_acc:.4f}, F1-macro: {f1_macro:.4f}')

        return avg_loss, accuracy, balanced_acc, f1_macro

    def evaluate(self, epoch):
        """评估模型"""
        self.model.eval()
        epoch_loss = 0.0
        all_predictions = []
        all_labels = []

        with torch.no_grad():
            progress_bar = tqdm(self.val_loader, desc=f'Epoch {epoch+1}/{self.config.epochs} [Val]')

            for batch in progress_bar:
                # 移动数据到设备
                images = batch['images'].to(self.device)
                text_inputs = {k: v.to(self.device) for k, v in batch['text_inputs'].items()}
                labels = batch['labels'].to(self.device)

                outputs = self.model(images, text_inputs)
                loss = self.criterion(outputs, labels)

                epoch_loss += loss.item()
                predictions = torch.argmax(outputs, dim=1)
                all_predictions.extend(predictions.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

                progress_bar.set_postfix({'loss': f'{loss.item():.4f}'})

        avg_loss = epoch_loss / len(self.val_loader)
        accuracy = accuracy_score(all_labels, all_predictions)
        balanced_acc = balanced_accuracy_score(all_labels, all_predictions)
        f1_macro = f1_score(all_labels, all_predictions, average='macro')

        self.val_history['loss'].append(avg_loss)
        self.val_history['accuracy'].append(accuracy)
        self.val_history['balanced_accuracy'].append(balanced_acc)
        self.val_history['f1_macro'].append(f1_macro)

        logger.info(f'Val   - Loss: {avg_loss:.4f}, Acc: {accuracy:.4f}, Balanced Acc: {balanced_acc:.4f}, F1-macro: {f1_macro:.4f}')

        # 更新学习率
        self.scheduler.step(f1_macro)

        return avg_loss, accuracy, balanced_acc, f1_macro

    def train(self):
        """主训练循环"""
        logger.info("开始训练...")
        logger.info(f"总轮数: {self.config.epochs}")
        logger.info(f"批次大小: {self.config.batch_size}")
        logger.info("⚠️ 使用患者级别数据分割，避免数据泄露")

        for epoch in range(self.config.epochs):
            logger.info(f"\n{'='*60}")
            logger.info(f'Epoch {epoch+1}/{self.config.epochs}')
            logger.info(f'{'='*60}')

            # 训练
            train_metrics = self.train_one_epoch(epoch)

            # 验证
            val_metrics = self.evaluate(epoch)

            # 检查最佳模型
            if val_metrics[3] > self.best_val_f1:
                self.best_val_f1 = val_metrics[3]
                self.patience_counter = 0

                # 保存模型
                checkpoint = {
                    'epoch': epoch,
                    'model_state_dict': self.model.state_dict(),
                    'optimizer_state_dict': self.optimizer.state_dict(),
                    'scheduler_state_dict': self.scheduler.state_dict(),
                    'train_history': self.train_history,
                    'val_history': self.val_history,
                    'best_val_f1': self.best_val_f1
                }

                torch.save(checkpoint, self.output_dir / 'best_model_patient_split.pth')
                logger.info(f"✅ 最佳模型已保存 (F1-macro: {self.best_val_f1:.4f})")
            else:
                self.patience_counter += 1
                logger.info(f"Patience: {self.patience_counter}/{self.config.patience}")

            # 早停
            if self.patience_counter >= self.config.patience:
                logger.info(f"\n早停触发，在第 {epoch+1} 轮停止训练")
                break

        logger.info("\n训练完成！")
        logger.info(f"最佳验证F1-macro: {self.best_val_f1:.4f}")

        return self.best_val_f1


class Config:
    """训练配置"""
    def __init__(self, test_mode=False):
        # 基本配置
        self.test_mode = test_mode
        self.seed = 42
        self.data_dir = r"D:\肠内镜数据库\UCEIS1-8"

        # 输出目录
        if test_mode:
            self.output_dir = "outputs/test_patient_split"
            self.epochs = 5
            self.batch_size = 8
        else:
            self.output_dir = "outputs/patient_split_training"
            self.epochs = 50
            self.batch_size = 16

        # 训练参数
        self.lr = 5e-5  # 较小的学习率
        self.weight_decay = 1e-4
        self.lr_patience = 5
        self.patience = 15
        self.train_num_workers = 0  # Windows兼容
        self.val_num_workers = 0

        # 模型参数
        self.image_pretrained = True
        self.text_pretrained = 'bert-base-uncased'
        self.feature_dim = 256
        self.num_classes = 8

        logger.info(f"配置初始化完成 (测试模式: {test_mode})")


def main():
    # 检查参数
    test_mode = '--test' in sys.argv

    # 创建配置
    config = Config(test_mode=test_mode)

    # 创建训练器
    trainer = PatientSplitTrainer(config)

    # 开始训练
    best_f1 = trainer.train()

    logger.info(f"训练完成，最佳F1-macro: {best_f1:.4f}")


if __name__ == "__main__":
    main()