"""
UCEIS模型训练脚本 - 修复版
修复了数据加载和模型初始化问题
"""

import os
import sys
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from pathlib import Path
import numpy as np
from sklearn.metrics import accuracy_score, balanced_accuracy_score, f1_score
from tqdm import tqdm
import json
import logging
import glob
import random
from datetime import datetime

# 添加utils路径
sys.path.append(os.path.join(os.path.dirname(__file__), 'utils'))

from dataset import UCEISDataset, train_transform, val_transform, custom_collate_fn

# 设置日志
logging.basicConfig(
    level=logging.INFO,
    format='%(asctime)s - %(levelname)s - %(message)s',
    handlers=[
        logging.FileHandler('training_fixed.log'),
        logging.StreamHandler()
    ]
)
logger = logging.getLogger(__name__)


class FixedUCEISTrainer:
    """修复版的UCEIS模型训练器"""

    def __init__(self, config):
        self.config = config
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

        logger.info(f"使用设备: {self.device}")

        # 设置随机种子
        torch.manual_seed(config.seed)
        np.random.seed(config.seed)
        if torch.cuda.is_available():
            torch.cuda.manual_seed(config.seed)

        # 初始化
        self.setup_model()
        self.setup_data()
        self.setup_optimizer()
        self.setup_logging()

        # 训练历史
        self.train_history = {
            'loss': [],
            'accuracy': [],
            'balanced_accuracy': [],
            'f1_macro': []
        }
        self.val_history = {
            'loss': [],
            'accuracy': [],
            'balanced_accuracy': [],
            'f1_macro': []
        }

        self.best_val_f1 = 0.0
        self.patience_counter = 0

    def setup_model(self):
        """初始化模型"""
        logger.info("初始化模型...")

        # 导入模型
        from models.fusion_model import EndoMultimodalModel

        self.model = EndoMultimodalModel(
            image_pretrained=self.config.image_pretrained,
            text_pretrained=self.config.text_pretrained,
            feature_dim=self.config.feature_dim,
            num_classes=self.config.num_classes
        )

        self.model = self.model.to(self.device)

        # 更好的初始化
        if hasattr(self.model, 'classifier'):
            for module in self.model.classifier.modules():
                if isinstance(module, nn.Linear):
                    nn.init.xavier_uniform_(module.weight)
                    nn.init.zeros_(module.bias)

        # 参数统计
        total_params = sum(p.numel() for p in self.model.parameters())
        trainable_params = sum(p.numel() for p in self.model.parameters() if p.requires_grad)
        logger.info(f"模型参数量: {total_params:,}")
        logger.info(f"可训练参数: {trainable_params:,}")

        # 损失函数 - 使用标签平滑
        self.criterion = nn.CrossEntropyLoss(label_smoothing=0.1)

    def setup_data(self):
        """设置数据加载器"""
        logger.info(f"加载数据集: {self.config.data_dir}")

        # 获取所有图像文件
        all_bmp_files = glob.glob(os.path.join(self.config.data_dir, '**', '*.bmp'), recursive=True)

        if not all_bmp_files:
            raise FileNotFoundError(f"在 {self.config.data_dir} 中未找到任何 .bmp 文件。")

        logger.info(f"总图像数: {len(all_bmp_files)}")

        # 设置随机种子并打乱
        random.seed(self.config.seed)
        random.shuffle(all_bmp_files)

        # 数据分割
        split_idx = int(len(all_bmp_files) * 0.9)
        train_files = all_bmp_files[:split_idx]
        val_files = all_bmp_files[split_idx:]

        logger.info(f"数据分割:")
        logger.info(f"  训练文件: {len(train_files)}")
        logger.info(f"  验证文件: {len(val_files)}")

        # 创建数据集 - 使用文件列表
        train_dataset = UCEISDataset(train_files, transform=train_transform)
        val_dataset = UCEISDataset(val_files, transform=val_transform)

        logger.info(f"训练集样本数: {len(train_dataset)}")
        logger.info(f"验证集样本数: {len(val_dataset)}")

        # 创建collate函数
        train_collate_fn = lambda batch: custom_collate_fn(batch, self.device)
        val_collate_fn = lambda batch: custom_collate_fn(batch, self.device)

        # 创建数据加载器
        self.train_loader = DataLoader(
            train_dataset,
            batch_size=self.config.batch_size,
            shuffle=True,
            num_workers=self.config.train_num_workers,
            pin_memory=self.device.type == 'cuda',
            collate_fn=train_collate_fn,
            drop_last=True
        )

        self.val_loader = DataLoader(
            val_dataset,
            batch_size=self.config.batch_size,
            shuffle=False,
            num_workers=self.config.val_num_workers,
            pin_memory=self.device.type == 'cuda',
            collate_fn=val_collate_fn,
            drop_last=False
        )

        logger.info("数据加载器创建完成")

    def setup_optimizer(self):
        """设置优化器"""
        # 分层学习率
        param_groups = [
            {
                'params': self.model.image_branch.parameters(),
                'lr': self.config.lr * 0.1,
                'weight_decay': self.config.weight_decay
            },
            {
                'params': self.model.text_branch.parameters(),
                'lr': self.config.lr * 0.1,
                'weight_decay': self.config.weight_decay
            },
            {
                'params': self.model.fusion_layers.parameters(),
                'lr': self.config.lr,
                'weight_decay': self.config.weight_decay
            },
            {
                'params': self.model.classifier.parameters(),
                'lr': self.config.lr,
                'weight_decay': self.config.weight_decay
            }
        ]

        self.optimizer = optim.AdamW(param_groups, weight_decay=self.config.weight_decay)

        # 学习率调度器
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer,
            mode='max',
            factor=0.5,
            patience=self.config.lr_patience
        )

        logger.info(f"优化器创建完成，学习率: {self.config.lr}")

    def setup_logging(self):
        """设置日志"""
        self.output_dir = Path(self.config.output_dir)
        self.output_dir.mkdir(parents=True, exist_ok=True)

        # 保存配置
        config_path = self.output_dir / 'config_fixed.json'
        with open(config_path, 'w', encoding='utf-8') as f:
            json.dump(vars(self.config), f, indent=2, ensure_ascii=False)

        logger.info(f"输出目录: {self.output_dir}")

    def train_one_epoch(self, epoch):
        """训练一个epoch"""
        self.model.train()
        epoch_loss = 0.0
        all_predictions = []
        all_labels = []

        progress_bar = tqdm(self.train_loader, desc=f'Epoch {epoch+1}/{self.config.epochs} [Train]')

        for batch_idx, batch in enumerate(progress_bar):
            # 移动数据到设备
            images = batch['images'].to(self.device)
            text_inputs = {k: v.to(self.device) for k, v in batch['text_inputs'].items()}
            labels = batch['labels'].to(self.device)

            self.optimizer.zero_grad()

            outputs = self.model(images, text_inputs)
            loss = self.criterion(outputs, labels)

            loss.backward()
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), max_norm=1.0)

            self.optimizer.step()

            epoch_loss += loss.item()
            predictions = torch.argmax(outputs, dim=1)
            all_predictions.extend(predictions.cpu().numpy())
            all_labels.extend(labels.cpu().numpy())

            # 更新进度条
            current_lr = self.optimizer.param_groups[0]['lr']
            progress_bar.set_postfix({
                'loss': f'{loss.item():.4f}',
                'acc': f'{accuracy_score(all_labels[-len(labels):], all_predictions[-len(labels):]):.3f}',
                'lr': f'{current_lr:.6f}'
            })

        avg_loss = epoch_loss / len(self.train_loader)
        accuracy = accuracy_score(all_labels, all_predictions)
        balanced_acc = balanced_accuracy_score(all_labels, all_predictions)
        f1_macro = f1_score(all_labels, all_predictions, average='macro')

        self.train_history['loss'].append(avg_loss)
        self.train_history['accuracy'].append(accuracy)
        self.train_history['balanced_accuracy'].append(balanced_acc)
        self.train_history['f1_macro'].append(f1_macro)

        logger.info(f'Train - Loss: {avg_loss:.4f}, Acc: {accuracy:.4f}, Balanced Acc: {balanced_acc:.4f}, F1-macro: {f1_macro:.4f}')

        return avg_loss, accuracy, balanced_acc, f1_macro

    def evaluate(self, epoch):
        """评估模型"""
        self.model.eval()
        epoch_loss = 0.0
        all_predictions = []
        all_labels = []

        with torch.no_grad():
            progress_bar = tqdm(self.val_loader, desc=f'Epoch {epoch+1}/{self.config.epochs} [Val]')

            for batch in progress_bar:
                # 移动数据到设备
                images = batch['images'].to(self.device)
                text_inputs = {k: v.to(self.device) for k, v in batch['text_inputs'].items()}
                labels = batch['labels'].to(self.device)

                outputs = self.model(images, text_inputs)
                loss = self.criterion(outputs, labels)

                epoch_loss += loss.item()
                predictions = torch.argmax(outputs, dim=1)
                all_predictions.extend(predictions.cpu().numpy())
                all_labels.extend(labels.cpu().numpy())

                progress_bar.set_postfix({'loss': f'{loss.item():.4f}'})

        avg_loss = epoch_loss / len(self.val_loader)
        accuracy = accuracy_score(all_labels, all_predictions)
        balanced_acc = balanced_accuracy_score(all_labels, all_predictions)
        f1_macro = f1_score(all_labels, all_predictions, average='macro')

        self.val_history['loss'].append(avg_loss)
        self.val_history['accuracy'].append(accuracy)
        self.val_history['balanced_accuracy'].append(balanced_acc)
        self.val_history['f1_macro'].append(f1_macro)

        logger.info(f'Val   - Loss: {avg_loss:.4f}, Acc: {accuracy:.4f}, Balanced Acc: {balanced_acc:.4f}, F1-macro: {f1_macro:.4f}')

        # 更新学习率
        self.scheduler.step(f1_macro)

        return avg_loss, accuracy, balanced_acc, f1_macro

    def train(self):
        """主训练循环"""
        logger.info("开始训练...")
        logger.info(f"总轮数: {self.config.epochs}")
        logger.info(f"批次大小: {self.config.batch_size}")

        for epoch in range(self.config.epochs):
            logger.info(f"\n{'='*60}")
            logger.info(f'Epoch {epoch+1}/{self.config.epochs}')
            logger.info(f'{'='*60}')

            # 训练
            train_metrics = self.train_one_epoch(epoch)

            # 验证
            val_metrics = self.evaluate(epoch)

            # 检查最佳模型
            if val_metrics[3] > self.best_val_f1:
                self.best_val_f1 = val_metrics[3]
                self.patience_counter = 0

                # 保存模型
                checkpoint = {
                    'epoch': epoch,
                    'model_state_dict': self.model.state_dict(),
                    'optimizer_state_dict': self.optimizer.state_dict(),
                    'scheduler_state_dict': self.scheduler.state_dict(),
                    'train_history': self.train_history,
                    'val_history': self.val_history,
                    'best_val_f1': self.best_val_f1
                }

                torch.save(checkpoint, self.output_dir / 'best_model_fixed.pth')
                logger.info(f"✅ 最佳模型已保存 (F1-macro: {self.best_val_f1:.4f})")
            else:
                self.patience_counter += 1
                logger.info(f"Patience: {self.patience_counter}/{self.config.patience}")

            # 早停
            if self.patience_counter >= self.config.patience:
                logger.info(f"\n早停触发，在第 {epoch+1} 轮停止训练")
                break

        logger.info("\n训练完成！")
        logger.info(f"最佳验证F1-macro: {self.best_val_f1:.4f}")

        return self.best_val_f1


class Config:
    """训练配置"""
    def __init__(self, test_mode=False):
        # 基本配置
        self.test_mode = test_mode
        self.seed = 42
        self.data_dir = r"D:\肠内镜数据库\UCEIS1-8"

        # 输出目录
        if test_mode:
            self.output_dir = "outputs/test_fixed"
            self.epochs = 5
            self.batch_size = 8
        else:
            self.output_dir = "outputs/standard_training_fixed"
            self.epochs = 50
            self.batch_size = 16

        # 训练参数
        self.lr = 1e-4  # 较小的学习率
        self.weight_decay = 1e-4
        self.lr_patience = 5
        self.patience = 15
        self.train_num_workers = 0  # Windows兼容
        self.val_num_workers = 0

        # 模型参数
        self.image_pretrained = True
        self.text_pretrained = 'bert-base-uncased'
        self.feature_dim = 256
        self.num_classes = 8

        logger.info(f"配置初始化完成 (测试模式: {test_mode})")


def main():
    # 检查参数
    test_mode = '--test' in sys.argv

    # 创建配置
    config = Config(test_mode=test_mode)

    # 创建训练器
    trainer = FixedUCEISTrainer(config)

    # 开始训练
    best_f1 = trainer.train()

    logger.info(f"训练完成，最佳F1-macro: {best_f1:.4f}")


if __name__ == "__main__":
    main()