# -*- coding: UTF-8 -*-
"""
@Date    ：2025/9/16 14:21 
@Author  ：Liu Yuezhao
@Project ：bert 
@File    ：pre_mlm_trainer.py
@IDE     ：PyCharm 
"""
import time
import torch
import logging
import os
from torch import nn
from torch.utils.data import DataLoader
from src.tools.utils import load_config
from transformers import get_linear_schedule_with_warmup

# 加载配置
yaml_config = load_config("./config.yaml")
train_params = yaml_config["train_config"]
device = yaml_config["config"]["device"]

class MLMTrainer:
    def __init__(self,
                 model: nn.Module,
                 train_loader: DataLoader,
                 valid_loader: DataLoader,
                 test_loader: DataLoader,
                 num_epochs: int = train_params['num_epochs'],
                 lr: float = float(train_params['lr']),
                 weight_decay: float = float(train_params['weight_decay']),
                 scheduler: bool = train_params["scheduler"],
                 use_amp: bool = train_params["use_amp"],
                 grad_clip: float = train_params["grad_clip"],
                 checkpoint_path: str = train_params["checkpoint_path"],
                 logs_file: str = train_params["logs_file"],
                 ):
        self.model = model.to(device)
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.test_loader = test_loader
        self.num_epochs = num_epochs
        self.lr = lr
        self.grad_clip = grad_clip
        self.checkpoint_path = checkpoint_path
        os.makedirs(checkpoint_path, exist_ok=True)

        # 配置日志目录
        self.logs_dir = logs_file
        os.makedirs(self.logs_dir, exist_ok=True)

        # 初始化 logger
        self.logger = logging.getLogger(f"MLMTrainer_{id(self)}")
        if not self.logger.handlers:  # 防止重复添加 handler
            self.logger.setLevel(logging.INFO)

            # 文件处理器
            fh = logging.FileHandler(os.path.join(self.logs_dir, "training.log"))
            fh.setLevel(logging.INFO)
            # 控制台处理器
            ch = logging.StreamHandler()
            ch.setLevel(logging.INFO)

            formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
            fh.setFormatter(formatter)
            ch.setFormatter(formatter)

            self.logger.addHandler(fh)
            self.logger.addHandler(ch)

        # 优化器
        self.optimizer = torch.optim.AdamW(model.parameters(), lr=self.lr, weight_decay=weight_decay)

        # 学习率调度器：Linear Warmup + Linear Decay
        total_steps = len(self.train_loader) * self.num_epochs
        warmup_steps = train_params.get("warmup_steps", int(0.1 * total_steps))

        self.scheduler = get_linear_schedule_with_warmup(
            self.optimizer,
            num_warmup_steps=warmup_steps,
            num_training_steps=total_steps
        ) if scheduler else None

        # 混合精度
        self.use_amp = use_amp
        self.scaler = torch.cuda.amp.GradScaler(enabled=use_amp)

        # 训练状态
        self.best_loss = float("inf")
        self.best_valid_acc = 0.0
        self.epochs_no_improve = 0
        self.early_stop_patience = train_params["early_stop_patience"]
        self.train_history = []

        # 结构化日志 CSV（用于绘图）
        self.csv_log_file = open(os.path.join(self.logs_dir, 'mlm_training.csv'), "w")
        self.csv_log_file.write("epoch,train_loss,valid_loss,lr,mask_acc\n")

    def train_epoch(self, epoch):
        self.model.train()
        total_loss = 0.0
        correct_masked_preds = 0
        total_masked_tokens = 0
        start_time = time.time()

        for batch_idx, batch in enumerate(self.train_loader):
            # 安全提取并移动到设备
            input_ids = batch['event_subtype'].to(device)
            labels = batch['event_subtype_labels'].to(device)
            attention_mask = batch['event_subtype_mask'].to(device)
            age = batch['age'].to(device)
            interval = batch['interval'].to(device)
            same_event_interval = batch['same_event_interval']

            with torch.cuda.amp.autocast(enabled=self.use_amp):
                outputs = self.model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    labels=labels,
                    age=age,
                    interval=interval,
                    same_event_interval=same_event_interval,
                )
                loss = outputs.loss

            self.optimizer.zero_grad()
            self.scaler.scale(loss).backward()

            # 梯度裁剪
            self.scaler.unscale_(self.optimizer)
            torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)

            self.scaler.step(self.optimizer)

            # 在 optimizer.step() 后立即更新学习率调度器（每个 step）
            if self.scheduler is not None:
                self.scheduler.step()

            self.scaler.update()

            total_loss += loss.item()

            # 计算掩码预测准确率
            masked_positions = (labels != -100)
            if masked_positions.any():
                preds = outputs.logits.argmax(dim=-1)
                correct = (preds[masked_positions] == labels[masked_positions]).sum().item()
                correct_masked_preds += correct
                total_masked_tokens += masked_positions.sum().item()

            if batch_idx % 50 == 0:
                current_lr = self.optimizer.param_groups[0]['lr']
                self.logger.info(
                    f"Epoch {epoch + 1} | Batch {batch_idx}/{len(self.train_loader)} | "
                    f"Loss: {loss.item():.4f} | LR: {current_lr:.2e}"
                )

        avg_loss = total_loss / len(self.train_loader)
        mask_acc = correct_masked_preds / total_masked_tokens if total_masked_tokens > 0 else 0.0
        epoch_time = time.time() - start_time

        # 获取当前学习率（warmup/decay 过程中的真实值）
        current_lr = self.optimizer.param_groups[0]["lr"]

        self.logger.info(
            f"Epoch {epoch + 1} | Train Loss: {avg_loss:.4f} | "
            f"Train Mask Accuracy: {mask_acc:.4f} | LR: {current_lr:.6e} | Time: {epoch_time:.2f}s"
        )

        return avg_loss, mask_acc, current_lr

    def validate(self):
        self.model.eval()
        total_loss = 0.0
        correct_masked_preds = 0
        total_masked_tokens = 0
        start_time = time.time()

        with torch.no_grad():
            for batch in self.valid_loader:
                input_ids = batch['event_subtype'].to(device)
                labels = batch['event_subtype_labels'].to(device)
                attention_mask = batch['event_subtype_mask'].to(device)
                age = batch['age'].to(device)
                interval = batch['interval'].to(device)
                same_event_interval = batch['same_event_interval']

                outputs = self.model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    labels=labels,
                    age=age,
                    interval=interval,
                    same_event_interval=same_event_interval,
                )
                loss = outputs.loss
                total_loss += loss.item()

                masked_positions = (labels != -100)
                if masked_positions.any():
                    preds = outputs.logits.argmax(dim=-1)
                    correct = (preds[masked_positions] == labels[masked_positions]).sum().item()
                    correct_masked_preds += correct
                    total_masked_tokens += masked_positions.sum().item()

        avg_loss = total_loss / len(self.valid_loader)
        mask_acc = correct_masked_preds / total_masked_tokens if total_masked_tokens > 0 else 0.0
        valid_time = time.time() - start_time

        self.logger.info(
            f"Valid Loss: {avg_loss:.4f} | Valid Mask Accuracy: {mask_acc:.4f} | Time: {valid_time:.2f}s"
        )

        return avg_loss, mask_acc

    def test(self):
        self.model.eval()
        total_loss = 0.0
        correct_masked_preds = 0
        total_masked_tokens = 0

        with torch.no_grad():
            for batch in self.test_loader:
                input_ids = batch['event_subtype'].to(device)
                labels = batch['event_subtype_labels'].to(device)
                attention_mask = batch['event_subtype_mask'].to(device)
                age = batch['age'].to(device)
                interval = batch['interval'].to(device)
                same_event_interval = batch['same_event_interval']

                outputs = self.model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    labels=labels,
                    age=age,
                    interval=interval,
                    same_event_interval=same_event_interval,
                )
                loss = outputs.loss
                total_loss += loss.item()

                masked_positions = (labels != -100)
                if masked_positions.any():
                    preds = outputs.logits.argmax(dim=-1)
                    correct = (preds[masked_positions] == labels[masked_positions]).sum().item()
                    correct_masked_preds += correct
                    total_masked_tokens += masked_positions.sum().item()

        avg_loss = total_loss / len(self.test_loader)
        mask_acc = correct_masked_preds / total_masked_tokens if total_masked_tokens > 0 else 0.0

        self.logger.info(f"Test Loss: {avg_loss:.4f} | Test Mask Accuracy: {mask_acc:.4f}")
        return avg_loss, mask_acc

    def train(self):
        try:
            self.logger.info(f"Starting MLM Pre-training on {device} ...")
            self.logger.info(f"Train samples: {len(self.train_loader.dataset)} | Valid samples: {len(self.valid_loader.dataset)}")
            self.logger.info(f"Total steps: {len(self.train_loader) * self.num_epochs}")

            for epoch in range(self.num_epochs):
                # 修改：返回 current_lr
                train_loss, train_mask_acc, current_lr = self.train_epoch(epoch)
                valid_loss, valid_mask_acc = self.validate()

                # 注意：这里不再调用 self.scheduler.step(valid_loss)

                epoch_stats = {
                    "epoch": epoch + 1,
                    "train_loss": train_loss,
                    "valid_loss": valid_loss,
                    "train_mask_acc": train_mask_acc,
                    "valid_mask_acc": valid_mask_acc,
                    "lr": current_lr,
                }
                self.train_history.append(epoch_stats)

                # 写入结构化 CSV
                self.csv_log_file.write(f"{epoch+1},{train_loss:.6f},{valid_loss:.6f},{current_lr:.6e},{valid_mask_acc:.6f}\n")
                self.csv_log_file.flush()

                # 早停判断
                if valid_loss < self.best_loss:
                    self.best_valid_acc = valid_mask_acc
                    self.best_loss = valid_loss
                    self.epochs_no_improve = 0

                    checkpoint = {
                        "epoch": epoch + 1,
                        "model_state_dict": self.model.state_dict(),
                        "optimizer_state_dict": self.optimizer.state_dict(),
                        "scaler_state_dict": self.scaler.state_dict(),
                        "valid_acc": valid_mask_acc,
                        "valid_loss": valid_loss,
                    }
                    torch.save(checkpoint, os.path.join(self.checkpoint_path, "best_mlm_model.pth"))
                    self.logger.info(f"Saved best model with valid loss: {valid_loss:.4f} | valid_acc: {valid_mask_acc:.4f}")
                else:
                    self.epochs_no_improve += 1
                    self.logger.info(f"No improvement for {self.epochs_no_improve} epochs. Best valid acc: {self.best_valid_acc:.4f}")

                if self.epochs_no_improve >= self.early_stop_patience:
                    self.logger.info(f"Early stopping triggered at epoch {epoch + 1}")
                    break

        except Exception as e:
            self.logger.error(f"Training interrupted with error: {str(e)}", exc_info=True)
            raise
        finally:
            # 关闭 CSV 日志
            if hasattr(self, 'csv_log_file') and not self.csv_log_file.closed:
                self.csv_log_file.close()

            # 清理 logger handlers
            for handler in self.logger.handlers[:]:
                handler.close()
                self.logger.removeHandler(handler)

            self.logger.info("MLM Pre-training finished.")

    def __del__(self):
        """确保资源释放"""
        if hasattr(self, 'csv_log_file') and not self.csv_log_file.closed:
            self.csv_log_file.close()