# -*- coding: UTF-8 -*-
"""
@Date    ：2025/9/29 10:17 
@Author  ：Liu Yuezhao
@Project ：bert 
@File    ：fine_tune_trainer.py
@IDE     ：PyCharm 
"""
import time
import torch
import logging
import os
import numpy as np

from torch import nn
from torch.utils.data import DataLoader
from sklearn.metrics import accuracy_score, roc_auc_score, f1_score, precision_score, recall_score
from src.tools.utils import load_config

# 加载配置
yaml_config = load_config("./config.yaml")
train_params = yaml_config["fine_tune_config"]
device = yaml_config["config"]["device"]


class FineTuneTrainer:
    def __init__(self,
                 model: nn.Module,
                 train_loader: DataLoader,
                 valid_loader: DataLoader,
                 test_loader: DataLoader,
                 num_epochs: int = train_params["num_epochs"],
                 lr: float = float(train_params["lr"]),
                 weight_decay: float = float(train_params['weight_decay']),
                 scheduler: bool = train_params["scheduler"],
                 use_amp: bool = train_params["use_amp"],
                 grad_clip: float = train_params["grad_clip"],
                 checkpoint_path: str = train_params["checkpoint_path"],
                 logs_file: str = train_params["logs_file"],
                 ):
        self.model = model.to(device)
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.test_loader = test_loader
        self.num_epochs = num_epochs
        self.lr = lr
        self.grad_clip = grad_clip
        self.checkpoint_path = checkpoint_path
        os.makedirs(checkpoint_path, exist_ok=True)

        # 配置日志目录
        self.logs_dir = logs_file
        os.makedirs(self.logs_dir, exist_ok=True)

        # 初始化 logger
        self.logger = logging.getLogger(f"FineTuneTrainer_{id(self)}")
        if not self.logger.handlers:  # 防止重复添加 handler
            self.logger.setLevel(logging.INFO)

            # 文件处理器
            fh = logging.FileHandler(os.path.join(self.logs_dir, "fine_tune.log"))
            fh.setLevel(logging.INFO)
            # 控制台处理器
            ch = logging.StreamHandler()
            ch.setLevel(logging.INFO)

            formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')
            fh.setFormatter(formatter)
            ch.setFormatter(formatter)

            self.logger.addHandler(fh)
            self.logger.addHandler(ch)

        # 优化器
        self.optimizer = torch.optim.AdamW(model.parameters(), lr=self.lr, weight_decay=weight_decay)

        # 学习率调度器：使用 ReduceLROnPlateau（替代 warmup）
        if scheduler:
            self.scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
                self.optimizer,
                mode='max',           # 监控验证 loss，越小越好
                factor=0.5,           # 学习率 × 0.5
                patience=3,           # 连续 3 轮没改善就降 lr
                min_lr=1e-7           # 最小学率
            )
        else:
            self.scheduler = None

        # 混合精度
        self.use_amp = use_amp
        self.scaler = torch.cuda.amp.GradScaler(enabled=use_amp)

        # 训练状态
        self.best_loss = float("inf")
        self.best_valid_acc = 0.0
        self.epochs_no_improve = 0
        self.best_valid_auc = -1.0
        self.early_stop_patience = train_params["early_stop_patience"]
        self.train_history = []

        # 结构化日志 CSV（用于绘图）
        self.csv_log_file = open(os.path.join(self.logs_dir, 'finetune_classification.csv'), "w")
        self.csv_log_file.write(
            "epoch,train_loss,valid_loss,lr,"
            "train_acc, train_auc,"
            "train_precision, train_recall,train_f1,"
            "valid_acc, valid_auc,"
            "valid_precision, valid_recall,"
            "valid_f1\n"
        )

    def _compute_metrics(self, labels, logits):
        """计算分类指标"""
        probs = torch.softmax(logits, dim=-1).cpu().numpy()
        preds = np.argmax(probs, axis=1)
        labels = labels.cpu().numpy()

        try:
            auc = roc_auc_score(labels, probs[:, 1])
        except ValueError:
            auc = float('nan')  # 多分类时可改为 ovr/oVo
        acc = accuracy_score(labels, preds)
        f1 = f1_score(labels, preds, average='binary', pos_label=1, zero_division=0)
        precision = precision_score(labels, preds, average='binary', pos_label=1, zero_division=0)
        recall = recall_score(labels, preds, average='binary', pos_label=1, zero_division=0)

        return {
            'acc': acc,
            'auc': auc,
            'f1': f1,
            'precision': precision,
            'recall': recall,
            'preds': preds,
            'probs': probs
        }

    def train_epoch(self, epoch):
        self.model.train()
        total_loss = 0.0
        all_labels = []
        all_logits = []
        start_time = time.time()

        for batch_idx, batch in enumerate(self.train_loader):
            # 数据加载
            input_ids = batch['event_subtype'].to(device)
            attention_mask = batch['event_subtype_mask'].to(device)
            labels = batch['bcard_target'].to(device)
            age = batch['age'].to(device)
            interval = batch['interval'].to(device)
            same_event_interval = batch['same_event_interval'].to(device)

            with torch.cuda.amp.autocast(enabled=self.use_amp):
                outputs = self.model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    age=age,
                    interval=interval,
                    same_event_interval=same_event_interval,
                    labels=labels,
                )
                loss = outputs["loss"]
                logits = outputs["logits"]

            self.optimizer.zero_grad()
            self.scaler.scale(loss).backward()

            if self.grad_clip > 0:
                self.scaler.unscale_(self.optimizer)
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), self.grad_clip)

            self.scaler.step(self.optimizer)
            self.scaler.update()

            total_loss += loss.item()
            all_labels.append(labels.detach().cpu())
            all_logits.append(logits.detach().cpu())

            if batch_idx % 50 == 0:
                current_lr = self.optimizer.param_groups[0]['lr']
                self.logger.info(
                    f"Epoch {epoch + 1} | Batch {batch_idx}/{len(self.train_loader)} | "
                    f"Loss: {loss.item():.4f} | LR: {current_lr:.2e}"
                )

        # 拼接所有 batch
        all_labels = torch.cat(all_labels, dim=0)
        all_logits = torch.cat(all_logits, dim=0)

        # 计算指标
        metrics = self._compute_metrics(all_labels, all_logits)
        avg_loss = total_loss / len(self.train_loader)
        epoch_time = time.time() - start_time
        current_lr = self.optimizer.param_groups[0]["lr"]

        self.logger.info(
            f"Epoch {epoch + 1} | Train Loss: {avg_loss:.4f} | Acc: {metrics['acc']:.4f} | "
            f"AUC: {metrics['auc']:.4f} | F1: {metrics['f1']:.4f} | "
            f"Precision: {metrics['precision']:.4f} | Recall: {metrics['recall']:.4f} | "
            f"LR: {current_lr:.6e} | Time: {epoch_time:.2f}s"
        )

        return avg_loss, metrics, current_lr

    @torch.no_grad()
    def evaluate(self, data_loader, prefix="Valid"):
        self.model.eval()
        total_loss = 0.0
        all_labels = []
        all_logits = []

        for batch in data_loader:
            input_ids = batch['event_subtype'].to(device)
            labels = batch['bcard_target'].to(device)
            attention_mask = batch['event_subtype_mask'].to(device)
            age = batch['age'].to(device)
            interval = batch['interval'].to(device)
            same_event_interval = batch['same_event_interval'].to(device)

            outputs = self.model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                labels=labels,
                age=age,
                interval=interval,
                same_event_interval=same_event_interval,
            )
            loss = outputs["loss"]
            logits = outputs["logits"]

            total_loss += loss.item()
            all_labels.append(labels.detach().cpu())
            all_logits.append(logits.detach().cpu())

        all_labels = torch.cat(all_labels, dim=0)
        all_logits = torch.cat(all_logits, dim=0)

        metrics = self._compute_metrics(all_labels, all_logits)
        avg_loss = total_loss / len(data_loader)

        self.logger.info(
            f"{prefix} Loss: {avg_loss:.4f} | {prefix} Acc: {metrics['acc']:.4f} | "
            f"{prefix} AUC: {metrics['auc']:.4f} | {prefix} F1: {metrics['f1']:.4f} | "
            f"{prefix} Precision: {metrics['precision']:.4f} | {prefix} Recall: {metrics['recall']:.4f}"
        )

        return avg_loss, metrics

    def test(self):
        """在测试集上评估"""
        # 加载最佳模型
        ckpt_path = os.path.join(self.checkpoint_path, "best_finetune_model.pth")
        if os.path.exists(ckpt_path):
            checkpoint = torch.load(ckpt_path, map_location=device, weights_only=False)
            model_to_load = self.model.module if hasattr(self.model, 'module') else self.model
            model_to_load.load_state_dict(checkpoint["model_state_dict"])
            self.logger.info(f"Loaded best model from {ckpt_path}")
        else:
            self.logger.warning("Best model not found, using current model.")
        self.logger.info("Evaluating on test set...")
        test_loss, test_metrics = self.evaluate(self.test_loader, prefix="Test")

        # 保存详细结果
        result_file = os.path.join(self.logs_dir, "test_results.txt")
        with open(result_file, "w") as f:
            f.write("Test Results:\n")
            for k, v in test_metrics.items():
                if k in ['acc', 'auc', 'f1', 'precision', 'recall']:
                    f.write(f"{k.upper()}: {v:.4f}\n")

        self.logger.info(f"Test results saved to {result_file}")
        return test_loss, test_metrics

    def train(self):
        try:
            self.logger.info(f"Starting Fine-Tuning on {device} ...")
            self.logger.info(
                f"Train: {len(self.train_loader.dataset)} | Valid: {len(self.valid_loader.dataset)} | Test: {len(self.test_loader.dataset)}")

            for epoch in range(self.num_epochs):
                train_loss, train_metrics, current_lr = self.train_epoch(epoch)
                valid_loss, valid_metrics = self.evaluate(self.valid_loader, prefix="Valid")

                # 写入 CSV
                self.csv_log_file.write(
                    f"{epoch + 1},{train_loss:.6f},{valid_loss:.6f},{current_lr:.6e},"
                    f"{train_metrics['acc']:.6f}, {train_metrics['auc']:.6f},"
                    f"{train_metrics['precision']:.6f}, {train_metrics['recall']:.6f},{train_metrics['f1']:.6f},"
                    f"{valid_metrics['acc']:.6f},{valid_metrics['auc']:.6f},"
                    f"{valid_metrics['precision']:.6f}, {valid_metrics['recall']:.6f},"
                    f"{valid_metrics['f1']:.6f}\n"
                )
                self.csv_log_file.flush()

                # 早停判断（以 valid_auc 为主）
                if valid_metrics['auc'] > self.best_valid_auc:
                    self.best_valid_auc = valid_metrics['auc']
                    self.epochs_no_improve = 0

                    # 保存最佳模型
                    model_to_save = self.model.module if hasattr(self.model, 'module') else self.model
                    checkpoint = {
                        "epoch": epoch + 1,
                        "model_state_dict": model_to_save.state_dict(),
                        "optimizer_state_dict": self.optimizer.state_dict(),
                        "scaler_state_dict": self.scaler.state_dict(),
                        "valid_auc": valid_metrics['auc'],
                        "valid_acc": valid_metrics['acc'],
                    }
                    torch.save(checkpoint, os.path.join(self.checkpoint_path, "best_finetune_model.pth"))
                    self.logger.info(f"Saved best model (Valid AUC: {valid_metrics['auc']:.4f})")
                else:
                    self.epochs_no_improve += 1
                    self.logger.info(
                        f"No improvement for {self.epochs_no_improve} epochs. Best Valid AUC: {self.best_valid_auc:.4f}")

                # 更新 ReduceLROnPlateau 调度器
                if self.scheduler is not None:
                    self.scheduler.step(valid_metrics['auc'])

                # 早停
                if self.epochs_no_improve >= self.early_stop_patience:
                    self.logger.info(f"Early stopping at epoch {epoch + 1}")
                    break

            # 最终测试
            self.test()

        except Exception as e:
            self.logger.error(f"Fine-tuning interrupted: {str(e)}", exc_info=True)
            raise
        finally:
            if hasattr(self, 'csv_log_file') and not self.csv_log_file.closed:
                self.csv_log_file.close()
            for handler in self.logger.handlers[:]:
                handler.close()
                self.logger.removeHandler(handler)
            self.logger.info("Fine-tuning completed.")

    def __del__(self):
        if hasattr(self, 'csv_log_file') and not self.csv_log_file.closed:
            self.csv_log_file.close()