# -*- coding: utf-8 -*-
# @Time    : 2025/9/22 13:35
# @Author  : chenmh
# @File    : train.py
# @Desc: 模型训练的代码
import logging, gc
import numpy as np
import torch, os
from sympy.physics.units import current
from torch import nn, optim
from torch.utils.checkpoint import checkpoint
from transformers import get_linear_schedule_with_warmup
from torch.utils.data import Dataset, DataLoader
from tqdm import tqdm
from utils import setup_logging, calc_metrics, get_latest_pth_file
from typing import Tuple


class Trainer:
    def __init__(self
                 , model: nn.Module
                 , criterion: nn.Module
                 , optimizer: optim.Optimizer
                 , scheduler: optim.lr_scheduler.LambdaLR
                 , train_loader: DataLoader
                 , valid_loader: DataLoader
                 , test_loader: DataLoader
                 , device: str
                 , num_epochs: int
                 , patience: int
                 , best_model_name: str
                 , save_path: str
                 , save_time: str
                 , logger: logging.Logger
                 , is_continued_train: bool
                 , continued_timestamp: str
                 , early_stop_metric: str
                 ):
        self.model = model
        self.criterion = criterion
        self.optimizer = optimizer
        self.scheduler = scheduler
        self.train_loader = train_loader
        self.valid_loader = valid_loader
        self.test_loader = test_loader
        self.device = device
        self.num_epochs = num_epochs
        self.patience = patience
        self.best_model_name = best_model_name
        self.save_path = save_path
        os.makedirs(self.save_path, exist_ok=True)
        self.save_time = save_time
        self.logger = logger
        self.model_save_path = f"{self.save_path}/{self.best_model_name}_{self.save_time}.pth"
        self.is_continued_train = is_continued_train
        self.continued_timestamp = continued_timestamp
        if self.is_continued_train:
            if not self.continued_timestamp:
                self.checkpoint_path = f"{self.save_path}/{get_latest_pth_file()[0]}"
            else:
                self.checkpoint_path = f"{self.save_path}/{self.best_model_name}_{self.continued_timestamp}.pth"
        self.start_epoch = 1
        self.early_stop_metric = early_stop_metric.lower()

    def train_step(self, epoch: int) -> Tuple[float, float]:
        self.model.train()
        train_loss = 0.0
        train_total = 0
        all_preds, all_labels, all_probs = [], [], []
        # 显示进度条吧
        with tqdm(self.train_loader, desc=f"Epoch {epoch}/{self.num_epochs} (Train) ") as pbar:
            for batch in pbar:
                item = batch["item"].to(self.device)
                times = batch["times"].to(self.device)
                static_vals = batch["static_vals"].to(self.device)
                static_features = torch.split(static_vals, split_size_or_sections=1, dim=1)
                label = batch["label"].to(self.device)
                self.optimizer.zero_grad()
                logits = self.model(input_ids=item, times=times, static_features=static_features)
                loss = self.criterion(logits, label)
                pred = logits.argmax(dim=1)
                prob_positive = logits.softmax(dim=1)[:, 1]
                all_preds.append(pred.detach().cpu())
                all_labels.append(label.detach().cpu())
                all_probs.append(prob_positive.detach().cpu())
                # calc_metrics(y_true=label, y_pred=pred, y_prob=prob_positive, logger=self.logger)
                train_loss += loss.item()
                train_total += label.size(0)
                del item, times, static_vals, static_features, label, pred, prob_positive
                gc.collect()
                torch.cuda.empty_cache()
                loss.backward()
                torch.nn.utils.clip_grad_norm_(self.model.parameters(), 1.0)  # 梯度裁剪
                self.optimizer.step()
                self.scheduler.step()
                # 更新进度条显示
                pbar.set_postfix({"Real-time train_loss": f"{loss.item():.4f}"})
        all_preds = torch.cat(all_preds)
        all_labels = torch.cat(all_labels)
        all_probs = torch.cat(all_probs)
        auc = calc_metrics(y_true=all_labels, y_pred=all_preds, y_prob=all_probs, logger=self.logger)

        avg_train_loss = train_loss / train_total
        return avg_train_loss, auc

    def evaluate_step(self, epoch: int, loader: DataLoader, desc_flag: str) -> Tuple[float, float]:
        self.model.eval()
        valid_test_loss = 0.0
        valid_test_total = 0
        all_preds, all_labels, all_probs = [], [], []
        # 显示进度条吧
        with tqdm(loader, desc=f"Epoch {epoch}/{self.num_epochs} ({desc_flag}) ") as pbar:
            for batch in pbar:
                item = batch["item"].to(self.device)
                times = batch["times"].to(self.device)
                static_vals = batch["static_vals"].to(self.device)
                static_features = torch.split(static_vals, split_size_or_sections=1, dim=1)
                label = batch["label"].to(self.device)
                logits = self.model(input_ids=item, times=times, static_features=static_features)
                loss = self.criterion(logits, label)
                pred = logits.argmax(dim=1)
                prob_positive = logits.softmax(dim=1)[:, 1]
                all_preds.append(pred.detach().cpu())
                all_labels.append(label.detach().cpu())
                all_probs.append(prob_positive.detach().cpu())
                # calc_metrics(y_true=label, y_pred=pred, y_prob=prob_positive, logger=self.logger)
                valid_test_loss += loss.item()
                valid_test_total += label.size(0)
                del item, times, static_vals, static_features, label, pred, prob_positive
                gc.collect()
                torch.cuda.empty_cache()
                # 更新进度条显示
                pbar.set_postfix({f"Real-time {desc_flag}_loss": f"{loss.item():.4f}"})
        all_preds = torch.cat(all_preds)
        all_labels = torch.cat(all_labels)
        all_probs = torch.cat(all_probs)
        auc = calc_metrics(y_true=all_labels, y_pred=all_preds, y_prob=all_probs, logger=self.logger)
        avg_loss = valid_test_loss / valid_test_total
        return avg_loss, auc

    def test_step(self, loader: DataLoader, desc_flag: str) -> Tuple[float, float]:
        self.model.eval()
        valid_test_loss = 0.0
        valid_test_total = 0
        all_preds, all_labels, all_probs = [], [], []
        # 显示进度条吧
        with tqdm(loader, desc=f" ({desc_flag}) ") as pbar:
            for batch in pbar:
                item = batch["item"].to(self.device)
                times = batch["times"].to(self.device)
                static_vals = batch["static_vals"].to(self.device)
                static_features = torch.split(static_vals, split_size_or_sections=1, dim=1)
                label = batch["label"].to(self.device)
                logits = self.model(input_ids=item, times=times, static_features=static_features)
                loss = self.criterion(logits, label)
                pred = logits.argmax(dim=1)
                prob_positive = logits.softmax(dim=1)[:, 1]
                all_preds.append(pred.detach().cpu())
                all_labels.append(label.detach().cpu())
                all_probs.append(prob_positive.detach().cpu())
                # calc_metrics(y_true=label, y_pred=pred, y_prob=prob_positive, logger=self.logger)
                valid_test_loss += loss.item()
                valid_test_total += label.size(0)
                del item, times, static_vals, static_features, label, pred, prob_positive
                gc.collect()
                torch.cuda.empty_cache()
                # 更新进度条显示
                pbar.set_postfix({f"Real-time {desc_flag}_loss": f"{loss.item():.4f}"})

        all_preds = torch.cat(all_preds)
        all_labels = torch.cat(all_labels)
        all_probs = torch.cat(all_probs)
        auc = calc_metrics(y_true=all_labels, y_pred=all_preds, y_prob=all_probs, logger=self.logger)
        avg_loss = valid_test_loss / valid_test_total
        return avg_loss, auc

    def train(self) -> None:
        best_valid_metric = np.inf
        early_stop_counter = 0

        self.logger.info(f"开始训练，共{self.num_epochs}轮，设备：{self.device}")
        self.logger.info(f"早停耐心值：{self.patience},最优模型保存路径：{self.model_save_path}")
        self.logger.info("-" * 60)
        self.model.to(self.device)

        if self.is_continued_train:
            self.logger.info(f"开始进行断点续训，加载模型参数。。。")
            checkpoint_state_dict = torch.load(self.checkpoint_path, weights_only=False)
            self.start_epoch = checkpoint_state_dict["epoch"]
            assert self.start_epoch <= self.num_epochs, "Train Epoch must be le than num_epochs!"
            self.model.load_state_dict(checkpoint_state_dict["model_state_dict"])
            self.optimizer.load_state_dict(checkpoint_state_dict["optimizer_state_dict"])
            best_valid_metric = checkpoint_state_dict["best_valid_metric"]

        for epoch in range(self.start_epoch, self.num_epochs + 1):
            avg_train_loss,train_auc = self.train_step(epoch=epoch)
            avg_valid_loss ,valid_auc= self.evaluate_step(epoch=epoch, loader=self.valid_loader, desc_flag="Valid")

            # 模型早停机制
            if self.early_stop_metric=="loss":
                current_valid_metric = avg_valid_loss
            else:
                current_valid_metric=train_auc
            if current_valid_metric < best_valid_metric:
                best_valid_metric = current_valid_metric
                early_stop_counter = 0
                # 保存最优模型
                torch.save({
                    "epoch": epoch
                    , "model_state_dict": self.model.state_dict()
                    , "optimizer_state_dict": self.optimizer.state_dict()
                    , "best_valid_metric": best_valid_metric
                }, self.model_save_path)
            else:
                early_stop_counter += 1

            # 本轮打印指标
            self.logger.info(f"\nEpoch {epoch} summary:")
            self.logger.info(f"训练集 - 损失：{avg_train_loss:.4f}")
            self.logger.info(f"验证集 - 损失：{avg_valid_loss:.4f}")
            self.logger.info(f"早停计数器：{early_stop_counter}/{self.patience}")
            self.logger.info("-" * 60)

            # 检查是否触发早停
            if early_stop_counter >= self.patience:
                self.logger.info(f"早停机制触发！连续{self.patience}轮验证指标无提升，停止训练！")
                break

        # Testing
        self.logger.info(f"\n训练结束，开始测试最优模型...")
        self.model.load_state_dict(
            torch.load(self.model_save_path, weights_only=False)["model_state_dict"]
        )
        avg_test_loss,test_auc = self.test_step(loader=self.test_loader, desc_flag="Test")

        self.logger.info(f"\n测试集最终指标：")
        self.logger.info(f"损失：{avg_test_loss:.4f}")
        self.logger.info(f"最优模型保存路径：{self.model_save_path}")
