import logging
import os
import time
from typing import Optional, List, Union

import numpy as np
import torch
from torch import nn, Tensor
from torch.optim.optimizer import Optimizer
from torch.optim.lr_scheduler import MultiStepLR, OneCycleLR
from torch.utils.data import DataLoader, TensorDataset
from torch.optim import Adam, AdamW

# Optional progress bar
try:
    from tqdm import tqdm as _tqdm
except Exception:
    def _tqdm(x, **kwargs):
        return x

from src.utils.logging import get_logger
from src.utils import metrics as mc
from src.utils.metrics import masked_mae
from src.base.sampler import RandomSampler, CutEdgeSampler

import pandas as pd
import wandb
from torch.cuda.amp import autocast, GradScaler


class BaseTrainer:
    """
    Base class for trainers in the DeepPA project.

    Args:
        model (nn.Module): The neural network model.
        adj_mat: The adjacency matrix.
        filter_type (str): The type of filter.
        data: The training data.
        aug (float): The augmentation factor.
        base_lr (float): The base learning rate.
        steps: The steps for learning rate decay.
        lr_decay_ratio: The learning rate decay ratio.
        log_dir (str): The directory for logging.
        n_exp (int): The experiment number.
        wandb_flag (str, optional): Flag for using Weights & Biases logging. Defaults to True.
        save_iter (int, optional): The iteration interval for saving the model. Defaults to 300.
        clip_grad_value (Optional[float], optional): The maximum gradient value for gradient clipping. Defaults to None.
        max_epochs (Optional[int], optional): The maximum number of epochs. Defaults to 1000.
        patience (Optional[int], optional): The patience for early stopping. Defaults to 1000.
        device (Optional[Union[torch.device, str]], optional): The device for training and evaluation. Defaults to None.
    """

    def __init__(
        self,
        model: nn.Module,
        adj_mat,
        filter_type: str,
        data,
        aug: float,
        base_lr: float,
        steps,
        lr_decay_ratio,
        log_dir: str,
        n_exp: int,
        wandb_flag: str = True,
        wandb_mode: str = "offline",
        wandb_project: str = "DeepPA",
        wandb_run_name: Optional[str] = None,
        wandb_dir: Optional[str] = None,
        save_iter: int = 300,
        clip_grad_value: Optional[float] = None,
        max_epochs: Optional[int] = 1000,
        patience: Optional[int] = 1000,
        device: Optional[Union[torch.device, str]] = None,
        weight_decay: float = 0.0,
        optimizer: str = "Adam",
        lr_scheduler: str = "multistep",
        onecycle_pct_start: float = 0.1,
        onecycle_anneal_strategy: str = "cos",
        onecycle_max_lr_multiplier: float = 10.0,
        # new params
        amp: bool = False,
        accum_steps: int = 1,
        resume_from: Optional[str] = None,
        auto_resume: bool = False,
        group_eval: bool = False,
        node_groups: Optional[dict] = None,
        mc_eval: bool = False,
        mc_samples: int = 20,
        mc_alphas: Optional[List[float]] = None,
        export_mc_artifacts: bool = False,
     ):
        super().__init__()

        self._logger = get_logger(
            log_dir, __name__, "info_{}.log".format(n_exp), level=logging.INFO
        )
        if device is None:
            print(
                "`device` is missing, try to train and evaluate the model on default device."
            )
            if torch.cuda.is_available():
                print("cuda device is available, place the model on the device.")
                self._device = torch.device("cuda")
            else:
                print("cuda device is not available, place the model on cpu.")
                self._device = torch.device("cpu")
        else:
            if isinstance(device, torch.device):
                self._device = device
            else:
                self._device = torch.device(device)

        self._model = model
        self._wandb_flag = wandb_flag
        self._wandb_inited = False
        self.model.to(self._device)
        self.num_param = self.model.param_num(self.model.name)
        self.nan_val = -1

        self._logger.info("the number of parameters: {}".format(self.num_param))

        self._adj_mat = adj_mat
        self._filter_type = filter_type
        self._aug = aug
        self._loss_fn = masked_mae
        # self._loss_fn.to(self._device)
        self._base_lr = base_lr
        self._weight_decay = weight_decay
        self._optimizer = self._init_optimizer(optimizer)
        self._lr_decay_ratio = lr_decay_ratio
        self._steps = steps
        self._lr_scheduler_type = lr_scheduler
        self._onecycle_pct_start = onecycle_pct_start
        self._onecycle_anneal_strategy = onecycle_anneal_strategy
        self._onecycle_max_lr_multiplier = onecycle_max_lr_multiplier
        if self._lr_scheduler_type == "multistep":
            if lr_decay_ratio == 1:
                self._lr_scheduler = None
            else:
                self._lr_scheduler = MultiStepLR(self.optimizer, steps, gamma=lr_decay_ratio)
        else:
            # OneCycleLR will be initialized in train() when total steps are known
            self._lr_scheduler = None
        self._clip_grad_value = clip_grad_value
        self._max_epochs = max_epochs
        self._patience = patience
        self._save_iter = save_iter
        self._save_path = log_dir
        self._n_exp = n_exp
        self._data = data
        self._supports = None
        # AMP & accumulation & resume
        self._amp = bool(amp)
        self._accum_steps = max(1, int(accum_steps))
        self._resume_from = resume_from
        self._auto_resume = bool(auto_resume)
        self._group_eval = bool(group_eval)
        self._node_groups = node_groups or None
        self._mc_eval = bool(mc_eval)
        self._mc_samples = int(mc_samples)
        # MC alphas for predictive intervals (robust parsing)
        try:
            if mc_alphas is None:
                self._mc_alphas = [0.8, 0.9, 0.95]
            elif isinstance(mc_alphas, (list, tuple)):
                self._mc_alphas = [float(a) for a in mc_alphas]
            else:
                self._mc_alphas = [float(x) for x in str(mc_alphas).split(",") if str(x).strip()]
        except Exception:
            self._mc_alphas = [0.8, 0.9, 0.95]
        self._export_mc_artifacts = bool(export_mc_artifacts)

        # detect CUDA for AMP scaler
        use_cuda = False
        try:
            use_cuda = (isinstance(self._device, torch.device) and self._device.type == "cuda")
        except Exception:
            use_cuda = torch.cuda.is_available()
        self._scaler = GradScaler(enabled=(self._amp and use_cuda))
        self._supports = None

        if aug > 0:
            self._sampler = RandomSampler(adj_mat, filter_type)

        # Initialize wandb after core fields are set
        if self._wandb_flag:
            try:
                if wandb_mode and wandb_mode.lower() == "offline":
                    os.environ["WANDB_MODE"] = "offline"
                if wandb_dir:
                    os.environ["WANDB_DIR"] = str(wandb_dir)
                wandb.init(
                    project=wandb_project,
                    name=wandb_run_name,
                    dir=wandb_dir,
                    config={
                        "base_lr": self._base_lr,
                        "weight_decay": self._weight_decay,
                        "optimizer": optimizer,
                        "lr_scheduler": self._lr_scheduler_type,
                        "onecycle_pct_start": self._onecycle_pct_start,
                        "onecycle_anneal_strategy": self._onecycle_anneal_strategy,
                        "onecycle_max_lr_multiplier": self._onecycle_max_lr_multiplier,
                        "max_epochs": self._max_epochs,
                        "batch_size": None,
                        "aug": self._aug,
                        "amp": self._amp,
                        "accum_steps": self._accum_steps,
                        "group_eval": self._group_eval,
                        "mc_eval": self._mc_eval,
                        "mc_samples": self._mc_samples,
                        "mc_alphas": self._mc_alphas,
                    },
                )
                wandb.run.summary["Params"] = self.num_param
                # Sync local CHANGELOG.md to wandb summary for teacher briefing
                self._sync_changelog_to_wandb()
                self._wandb_inited = True
            except Exception as e:
                self._logger.warning(f"wandb init failed: {e}")
                self._wandb_inited = False

    def _sync_changelog_to_wandb(self, changelog_path: str = "CHANGELOG.md", max_chars: int = 20000):
        """
        Read local CHANGELOG.md and push its content into wandb summary as 'changelog'.

        Args:
            changelog_path: Path to CHANGELOG file relative to cwd.
            max_chars: Max characters to push to summary to avoid oversized payload.
        """
        if not (self._wandb_flag and self._wandb_inited):
            return
        try:
            fullpath = os.path.abspath(changelog_path)
            if os.path.exists(fullpath):
                with open(fullpath, "r", encoding="utf-8") as f:
                    content = f.read()
                if len(content) > max_chars:
                    content = "...\n" + content[-max_chars:]
                wandb.run.summary["changelog"] = content
                wandb.run.summary["changelog_synced"] = True
            else:
                wandb.run.summary["changelog_synced"] = False
        except Exception as e:
            # Avoid crashing training due to changelog sync; just warn.
            self._logger.warning(f"sync changelog to wandb failed: {e}")

    @property
    def model(self):
        return self._model

    @property
    def supports(self):
        return self._supports

    @property
    def data(self):
        return self._data

    @property
    def logger(self):
        return self._logger

    @property
    def optimizer(self):
        return self._optimizer

    @property
    def lr_scheduler(self):
        return self._lr_scheduler

    @property
    def loss_fn(self):
        return self._loss_fn

    @property
    def device(self):
        return self._device

    @property
    def save_path(self):
        return self._save_path

    def _check_device(self, tensors: Union[Tensor, List[Tensor]]):
        if isinstance(tensors, list):
            # 优先使用 non_blocking 以提升 H2D 搬运性能
            return [tensor.to(self._device, non_blocking=True) for tensor in tensors]
        else:
            return tensors.to(self._device, non_blocking=True)

    def _inverse_transform(self, tensors: Union[Tensor, List[Tensor]]):
        n_output_dim = 1

        def inv(tensor, scalers):
            for i in range(n_output_dim):
                tensor[..., i] = scalers[i].inverse_transform(tensor[..., i])
            return tensor

        if isinstance(tensors, list):
            return [inv(tensor, self.data["scalers"]) for tensor in tensors]
        else:
            return inv(tensors, self.data["scalers"])

    def _to_numpy(self, tensors: Union[Tensor, List[Tensor]]):
        if isinstance(tensors, list):
            return [tensor.cpu().detach().numpy() for tensor in tensors]
        else:
            return tensors.cpu().detach().numpy()

    def _to_tensor(self, nparray):
        if isinstance(nparray, list):
            return [Tensor(array) for array in nparray]
        else:
            return Tensor(nparray)

    def save_model(self, epoch, save_path, n_exp):
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        filename = "final_model_{}.pt".format(n_exp)
        fullpath = os.path.join(save_path, filename)
        torch.save(self.model.state_dict(), fullpath)
        return fullpath

    def load_model(self, epoch, save_path, n_exp):
        filename = "final_model_{}.pt".format(n_exp)
        fullpath = os.path.join(save_path, filename)
        # Gracefully skip if checkpoint not found
        if not os.path.exists(fullpath):
            try:
                self._logger.warning(f"Final model checkpoint not found: {fullpath}")
            except Exception:
                print(f"Final model checkpoint not found: {fullpath}")
            return None
        try:
            state = torch.load(fullpath, map_location=self.device)
            self.model.load_state_dict(state)
            return fullpath
        except Exception as e:
            try:
                self._logger.warning(f"Failed to load final model from {fullpath}: {e}")
            except Exception:
                print(f"Failed to load final model from {fullpath}: {e}")
            return None

    def save_best_model(self, epoch, save_path, n_exp):
        if not os.path.exists(save_path):
            os.makedirs(save_path)
        filename = "best_model_{}.pt".format(n_exp)
        fullpath = os.path.join(save_path, filename)
        torch.save(self.model.state_dict(), fullpath)
        return fullpath

    def load_best_model(self, epoch, save_path, n_exp):
        filename = "best_model_{}.pt".format(n_exp)
        fullpath = os.path.join(save_path, filename)
        # Gracefully skip if checkpoint not found
        if not os.path.exists(fullpath):
            try:
                self._logger.warning(f"Best model checkpoint not found: {fullpath}")
            except Exception:
                print(f"Best model checkpoint not found: {fullpath}")
            return None
        try:
            state = torch.load(fullpath, map_location=self.device)
            self.model.load_state_dict(state)
            return fullpath
        except Exception as e:
            try:
                self._logger.warning(f"Failed to load best model from {fullpath}: {e}")
            except Exception:
                print(f"Failed to load best model from {fullpath}: {e}")
            return None

    def test_batch(self, X, label):
        pred = self.model(X, self.supports)
        pred, label = self._inverse_transform([pred, label])
        return pred, label

    def test(self, epoch, mode="test"):
        # Prefer best model; fallback to final (skip gracefully if none)
        loaded = self.load_best_model(epoch, self.save_path, self._n_exp)
        if not loaded:
            loaded = self.load_model(epoch, self.save_path, self._n_exp)

        labels = []
        preds = []
        with torch.no_grad():
            if self._mc_eval:
                self.model.train()
                total_std_sum = 0.0
                total_count = 0
                labels = []
                preds = []
                uncerts = []
                alphas = (self._mc_alphas if isinstance(self._mc_alphas, (list, tuple)) else [0.8, 0.9, 0.95])
                coverage_counts = {a: 0 for a in alphas}
                coverage_totals = {a: 0 for a in alphas}
                width_sums = {a: 0.0 for a in alphas}
                width_counts = {a: 0 for a in alphas}
                intervals_low = {a: [] for a in alphas}
                intervals_high = {a: [] for a in alphas}
                p_list = sorted(set([0.5] + [ (1.0 - a) / 2.0 for a in alphas ] + [ 1.0 - (1.0 - a) / 2.0 for a in alphas ]))
                q_below_counts = {p: 0 for p in p_list}
                q_totals = {p: 0 for p in p_list}
                nll_sum = 0.0
                nll_count = 0
                eps = 1e-6
                for _, (X, label) in enumerate(_tqdm(self.data[mode + "_loader"], desc=mode.capitalize(), leave=False)):
                    X, label = self._check_device([X, label])
                    # Monte Carlo sampling
                    mc_preds = []
                    for _ in range(max(1, self._mc_samples)):
                        pred, _ = self.test_batch(X, label)
                        mc_preds.append(pred.cpu().unsqueeze(0))
                    mc_preds = torch.cat(mc_preds, dim=0)  # [S, B, H, N, C]
                    mean_pred = mc_preds.mean(dim=0)
                    std_pred = mc_preds.std(dim=0)
                    uncerts.append(std_pred.cpu())
                    total_std_sum += float(std_pred.mean().item())
                    total_count += 1
                    # mask valid entries
                    lbl_cpu = label.cpu()
                    valid = (lbl_cpu != self.nan_val)
                    # NLL under Gaussian approximation
                    var_pred = std_pred**2 + eps
                    nll = 0.5 * torch.log(2.0 * torch.tensor(np.pi)) + 0.5 * torch.log(var_pred) + 0.5 * ((lbl_cpu - mean_pred)**2 / var_pred)
                    nll_sum += float(nll[valid].sum().item())
                    nll_count += int(valid.sum().item())
                    # Predictive intervals and coverage/width
                    for a in alphas:
                        pl = (1.0 - a) / 2.0
                        ph = 1.0 - pl
                        q_low = torch.quantile(mc_preds, pl, dim=0)
                        q_high = torch.quantile(mc_preds, ph, dim=0)
                        # 收集区间用于导出
                        intervals_low[a].append(q_low.cpu())
                        intervals_high[a].append(q_high.cpu())
                        covered = (lbl_cpu >= q_low) & (lbl_cpu <= q_high) & valid
                        coverage_counts[a] += int(covered.sum().item())
                        coverage_totals[a] += int(valid.sum().item())
                        width = (q_high - q_low)
                        width_sums[a] += float(width[valid].sum().item())
                        width_counts[a] += int(valid.sum().item())
                    # Quantile ECE (empirical freq vs nominal p)
                    for p in p_list:
                        q_p = torch.quantile(mc_preds, p, dim=0)
                        below = (lbl_cpu <= q_p) & valid
                        q_below_counts[p] += int(below.sum().item())
                        q_totals[p] += int(valid.sum().item())
                    labels.append(lbl_cpu)
                    preds.append(mean_pred)
                # export artifacts if requested
                if self._export_mc_artifacts:
                    try:
                        os.makedirs(self.save_path, exist_ok=True)
                        labels_np = torch.cat(labels, dim=0).numpy()
                        mean_np = torch.cat(preds, dim=0).numpy()
                        std_np = torch.cat(uncerts, dim=0).numpy()
                        np.save(os.path.join(self.save_path, f"mc_labels_{self._n_exp}.npy"), labels_np)
                        np.save(os.path.join(self.save_path, f"mc_mean_{self._n_exp}.npy"), mean_np)
                        np.save(os.path.join(self.save_path, f"mc_std_{self._n_exp}.npy"), std_np)
                        for a in alphas:
                            low_np = torch.cat(intervals_low[a], dim=0).numpy()
                            high_np = torch.cat(intervals_high[a], dim=0).numpy()
                            np.save(os.path.join(self.save_path, f"mc_int_low@{int(a*100)}_{self._n_exp}.npy"), low_np)
                            np.save(os.path.join(self.save_path, f"mc_int_high@{int(a*100)}_{self._n_exp}.npy"), high_np)
                        self._logger.info("MC artifacts saved: mean/std/intervals")
                    except Exception as e:
                        self._logger.warning(f"Saving MC artifacts failed: {e}")
                # restore eval mode
                self.model.eval()
            else:
                self.model.eval()
                for _, (X, label) in enumerate(_tqdm(self.data[mode + "_loader"], desc=mode.capitalize(), leave=False)):
                    X, label = self._check_device([X, label])
                    pred, label = self.test_batch(X, label)
                    labels.append(label.cpu())
                    preds.append(pred.cpu())

        labels = torch.cat(labels, dim=0)
        preds = torch.cat(preds, dim=0)
        metrics = mc.compute_all_metrics(preds, labels, self.nan_val)
        log = "====Using evaluate function for test data -- Test MAE: {:.4f}, Test RMSE: {:.4f}===="
        print(log.format(metrics[0], metrics[1]))
        # Log MC uncertainty summary if enabled
        if self._mc_eval and self._wandb_flag and self._wandb_inited:
            try:
                wandb.run.summary["test/uncertainty_mean_std"] = float(total_std_sum / max(1, total_count))
                # coverage and interval width per alpha
                for a in self._mc_alphas:
                    cov_rate = float(coverage_counts.get(a, 0) / max(1, coverage_totals.get(a, 0)))
                    mean_width = float(width_sums.get(a, 0.0) / max(1, width_counts.get(a, 0)))
                    wandb.run.summary[f"test/mc/coverage@{int(a*100)}%"] = cov_rate
                    wandb.run.summary[f"test/mc/interval_width@{int(a*100)}%"] = mean_width
                # NLL average under Gaussian approx
                wandb.run.summary["test/mc/nll"] = float(nll_sum / max(1, nll_count))
                # quantile calibration and ECE
                ece_vals = []
                for p in p_list:
                    empirical = float(q_below_counts.get(p, 0) / max(1, q_totals.get(p, 0)))
                    wandb.run.summary[f"test/mc/calibration@p={p:.2f}"] = empirical
                    ece_vals.append(abs(empirical - float(p)))
                if len(ece_vals) > 0:
                    wandb.run.summary["test/mc/quantile_ece_mean"] = float(np.mean(ece_vals))
            except Exception:
                pass
        # grouped evaluation for test
        if self._group_eval and isinstance(self._node_groups, dict):
            try:
                grouped = mc.compute_grouped_metrics(preds, labels, self._node_groups, self.nan_val)
                for g, stats in grouped.items():
                    print(f"Group {g} Test MAE: {float(stats.get('MAE', np.nan)):.4f}, RMSE: {float(stats.get('RMSE', np.nan)):.4f}")
                if self._wandb_flag and self._wandb_inited:
                    for g, stats in grouped.items():
                        wandb.run.summary[f"test/group/{g}/mae"] = float(stats.get("MAE", np.nan))
                        wandb.run.summary[f"test/group/{g}/rmse"] = float(stats.get("RMSE", np.nan))
            except Exception as e:
                self._logger.warning(f"Grouped test metrics failed: {e}")
        amae = []
        # aacc = []
        armse = []

        for i in range(self.model.horizon):
            pred = preds[:, i : i + 1]
            real = labels[:, i : i + 1]
            metrics = mc.compute_all_metrics(pred, real, self.nan_val)
            log = "====Using evaluate function for test data -- Test MAE: {:.4f}, Test RMSE: {:.4f}===="
            print(log.format(metrics[0], metrics[1]))
            amae.append(metrics[0])
            armse.append(metrics[1])

        log = "On average over {} horizons, Average Test MAE: {:.4f}, Test RMSE: {:.4f}"
        print(log.format(self.model.horizon, np.mean(amae), np.mean(armse)))

        if self._wandb_flag and self._wandb_inited:
            wandb.run.summary["test MAE"] = np.mean(amae)
            wandb.run.summary["test RMSE"] = np.mean(armse)
            wandb.run.summary["test_mae_by_horizon"] = amae
            wandb.run.summary["test_rmse_by_horizon"] = armse
            self._sync_changelog_to_wandb()

        if self.model.horizon == 12:
            amae_day = []
            armse_day = []

            for i in range(0, self.model.horizon, 4):
                pred = preds[:, i : i + 4]
                real = labels[:, i : i + 4]
                metrics = mc.compute_all_metrics(pred, real, self.nan_val)
                # print(log.format(i+1, metrics[0], metrics[1], metrics[2]))
                amae_day.append(metrics[0])
                armse_day.append(metrics[1])

            log = "0-3 (1h) Test MAE: {:.4f}, Test RMSE: {:.4f}"
            print(log.format(amae_day[0], armse_day[0]))
            log = "4-7 (2h) Test MAE: {:.4f}, Test RMSE: {:.4f}"
            print(log.format(amae_day[1], armse_day[1]))
            log = "8-11 (3h) Test MAE: {:.4f}, Test RMSE: {:.4f}"
            print(log.format(amae_day[2], armse_day[2]))

        results = np.stack([amae, armse], axis=0)
        np.savetxt(
            os.path.join(self.save_path, "results_{}.csv".format(self._n_exp)),
            results,
            fmt="%.4f",
            delimiter=",",
        )

        # return amae, armse, aacc

    def save_preds(self, epoch):
        try:
            self.load_best_model(epoch, self.save_path, self._n_exp)
        except Exception:
            self.load_model(epoch, self.save_path, self._n_exp)

        for mode in ["train", "val", "test"]:
            labels = []
            preds = []
            inputs = []
            with torch.no_grad():
                self.model.eval()
                for _, (X, label) in enumerate(_tqdm(self.data[mode + "_loader"], desc=f"Save {mode}", leave=False)):
                    X, label = self._check_device([X, label])
                    pred, label = self.test_batch(X, label)
                    labels.append(label.cpu())
                    preds.append(pred.cpu())
                    inputs.append(X.cpu())
            labels = torch.cat(labels, dim=0)
            preds = torch.cat(preds, dim=0)
            inputs = torch.cat(inputs, dim=0)
            np.save(os.path.join(self.save_path, mode + "_preds.npy"), preds)
            np.save(os.path.join(self.save_path, mode + "_labels.npy"), labels)

    def train(self):
        """
        Minimal training loop with optional OneCycleLR and gradient accumulation.
        Initializes OneCycleLR when selected and not preset, and steps scheduler
        only when optimizer steps (after accumulation).
        """
        train_loader = self.data.get("train_loader")
        val_loader = self.data.get("val_loader")
        if train_loader is None:
            raise ValueError("data['train_loader'] is required for training")

        self.model.train()
        total_batches = len(train_loader) * max(1, int(self._max_epochs or 1))
        # initialize OneCycleLR if selected and not already provided
        if (str(self._lr_scheduler_type).lower() == "onecycle") and (self._lr_scheduler is None):
            total_steps = max(1, total_batches // max(1, int(self._accum_steps)))
            try:
                self._lr_scheduler = OneCycleLR(
                    self.optimizer,
                    max_lr=float(self._base_lr) * float(self._onecycle_max_lr_multiplier),
                    total_steps=int(total_steps),
                    pct_start=float(self._onecycle_pct_start),
                    anneal_strategy=str(self._onecycle_anneal_strategy),
                )
            except Exception as e:
                try:
                    self._logger.warning(f"OneCycleLR init failed: {e}")
                except Exception:
                    print(f"OneCycleLR init failed: {e}")
                self._lr_scheduler = None

        best_val = float("inf")
        for epoch in range(max(1, int(self._max_epochs or 1))):
            running_loss = 0.0
            self.optimizer.zero_grad(set_to_none=True)
            for bidx, (X, label) in enumerate(_tqdm(train_loader, desc=f"Train {epoch+1}/{self._max_epochs}", leave=False)):
                X, label = self._check_device([X, label])
                with autocast(enabled=bool(self._amp)):
                    pred = self.model(X, self.supports)
                    loss = self._loss_fn(pred, label, self.nan_val)
                    if self._accum_steps > 1:
                        loss = loss / float(self._accum_steps)
                if self._amp:
                    self._scaler.scale(loss).backward()
                else:
                    loss.backward()
                running_loss += float(loss.detach().cpu().item())

                # take optimizer step when reaching accumulation boundary
                if (bidx + 1) % int(self._accum_steps) == 0:
                    # clip gradients if configured
                    if self._clip_grad_value is not None:
                        try:
                            if self._amp:
                                self._scaler.unscale_(self.optimizer)
                            torch.nn.utils.clip_grad_value_(self.model.parameters(), float(self._clip_grad_value))
                        except Exception:
                            torch.nn.utils.clip_grad_norm_(self.model.parameters(), float(self._clip_grad_value))
                    if self._amp:
                        self._scaler.step(self.optimizer)
                        self._scaler.update()
                    else:
                        self.optimizer.step()
                    # step scheduler only when optimizer steps
                    if self._lr_scheduler is not None:
                        try:
                            self._lr_scheduler.step()
                        except Exception:
                            pass
                    self.optimizer.zero_grad(set_to_none=True)

            # quick validation to track best
            if val_loader is not None:
                try:
                    self.model.eval()
                    val_loss = 0.0
                    count = 0
                    with torch.no_grad():
                        for Xv, Yv in val_loader:
                            Xv, Yv = self._check_device([Xv, Yv])
                            Pv = self.model(Xv, self.supports)
                            lv = self._loss_fn(Pv, Yv, self.nan_val)
                            val_loss += float(lv.cpu().item())
                            count += 1
                    val_loss = val_loss / max(1, count)
                    self.model.train()
                    if val_loss < best_val:
                        best_val = val_loss
                        try:
                            self.save_best_model(epoch, self.save_path, self._n_exp)
                        except Exception:
                            pass
                except Exception:
                    self.model.train()

            # wandb logging
            if self._wandb_flag and self._wandb_inited:
                try:
                    wandb.log({
                        "train/loss": running_loss / max(1, len(train_loader)),
                        "epoch": epoch,
                        "lr": (self._lr_scheduler.get_last_lr()[0] if self._lr_scheduler is not None else self.optimizer.param_groups[0]["lr"]),
                    })
                    self._sync_changelog_to_wandb()
                except Exception:
                    pass
        # save final model
        try:
            self.save_model(int(self._max_epochs or 1), self.save_path, self._n_exp)
        except Exception:
            pass

    def _init_optimizer(self, optimizer_name: str):
        # Build parameter groups: exclude bias and norm/bn parameters from weight decay
        decay_params = []
        no_decay_params = []
        for name, p in self.model.named_parameters():
            if not p.requires_grad:
                continue
            if p.ndim == 1 or name.endswith(".bias") or "bias" in name or "norm" in name or "bn" in name:
                no_decay_params.append(p)
            else:
                decay_params.append(p)
        param_groups = [
            {"params": decay_params, "weight_decay": self._weight_decay},
            {"params": no_decay_params, "weight_decay": 0.0},
        ]
        if str(optimizer_name).lower() == "adamw":
            return AdamW(param_groups, lr=self._base_lr)
        else:
            return Adam(param_groups, lr=self._base_lr)
