# ====================================================================================
# 文件: src/main.py
# 描述: [V19 修复版] 修复 Logger Attribute Error，增强路径获取鲁棒性
# ====================================================================================

import os
import time
from typing import List
import torch
import torch.optim as optim
import logging
from parser import parse_args
from dataloader import load_data
from model import SDKR
from utils import get_logger, set_seed
from evaluate import test


def _ensure_top_k(config):
    if not hasattr(config, "top_k"):
        setattr(config, "top_k", [10, 20, 50])
    return config.top_k


def _format_metrics(results, top_k):
    metrics_str = "\n" + "=" * 60 + "\n"
    metrics_str += f"{'Metric':<12} | " + " | ".join([f"@{k:<3}" for k in top_k]) + "\n"
    metrics_str += "-" * 60 + "\n"
    for metric_name in ["recall", "ndcg", "precision", "hit"]:
        values = results[metric_name]
        row_str = f"{metric_name.capitalize():<12} | " + " | ".join([f"{v:.4f}" for v in values])
        metrics_str += row_str + "\n"
    metrics_str += "=" * 60
    return metrics_str


def apply_agm(model, agm_config, logger=None, step=0):
    """ AGM 核心逻辑: 监控并平衡梯度 """
    if not agm_config.get("enable", False): return

    id_params = [p for n, p in model.named_parameters() if
                 ("user_embedding" in n or "item_embedding" in n) and p.grad is not None]
    sem_params = [p for n, p in model.named_parameters() if
                  ("user_embedding" not in n and "item_embedding" not in n) and p.grad is not None]

    if not id_params or not sem_params: return

    id_norm = torch.norm(torch.stack([torch.norm(p.grad.detach()) for p in id_params]))
    sem_norm = torch.norm(torch.stack([torch.norm(p.grad.detach()) for p in sem_params]))

    epsilon = 1e-8
    ratio = (sem_norm + epsilon) / (id_norm + epsilon)
    threshold = agm_config.get("threshold", 1.0)

    if logger and step % 100 == 0:
        logger.info(f"[AGM Step {step}] Ratio: {ratio:.4f} (ID: {id_norm:.4f}, Sem: {sem_norm:.4f})")

    if ratio < threshold:
        scale = ratio
        for p in id_params:
            p.grad *= scale


def train(model, data, config, logger):
    top_k = _ensure_top_k(config)
    optimizer = optim.AdamW(model.parameters(), lr=config.learning_rate, weight_decay=config.weight_decay)

    # 配置读取
    target_alpha = getattr(config, "alpha", 0.1)
    target_beta = getattr(config, "beta", 0.1)
    target_gamma = getattr(config, "gamma", 0.1)
    warmup_epochs = 20

    best_recall = 0.0
    patience = 0
    global_step = 0

    # [Fix] 鲁棒地获取保存路径
    save_dir = "logs"  # 默认值
    for h in logger.handlers:
        if isinstance(h, logging.FileHandler):
            save_dir = os.path.dirname(h.baseFilename)
            break

    os.makedirs(save_dir, exist_ok=True)
    model_save_path = os.path.join(save_dir, "best_model.pth")
    logger.info(f"[Setup] Model checkpoints will be saved to: {model_save_path}")

    logger.info(f"--- Training Start (Warmup: {warmup_epochs} epochs) ---")

    for epoch in range(1, config.epochs + 1):
        # --- Dynamic Weight Scheduling (Warmup) ---
        if epoch <= warmup_epochs:
            model.alpha = 0.0
            model.beta = 0.0
            model.gamma = 0.0
            if epoch == 1: logger.info("[Warmup] Fusion weights set to 0. Training Pure ID backbone.")
        elif epoch == warmup_epochs + 1:
            model.alpha = target_alpha
            model.beta = target_beta
            model.gamma = target_gamma
            logger.info(f"[Warmup Done] Fusion weights restored: a={model.alpha}, b={model.beta}, g={model.gamma}")

        model.train()
        loader = data.get_train_loader(config.batch_size)

        epoch_loss = 0.0
        metrics_accum = {'bpr': 0.0, 'recon': 0.0, 'sra': 0.0}

        for users, pos, neg in loader:
            global_step += 1
            users, pos, neg = users.to(config.device), pos.to(config.device), neg.to(config.device)

            optimizer.zero_grad()
            loss, bpr, recon, sracl = model.calculate_loss(users, pos, neg)
            loss.backward()

            if getattr(config, "agm_config", {}).get("enable", False):
                apply_agm(model, config.agm_config, logger, global_step)

            optimizer.step()

            epoch_loss += loss.item()
            metrics_accum['bpr'] += bpr.item()
            metrics_accum['recon'] += recon.item()
            metrics_accum['sra'] += sracl.item()

        n_batches = len(loader)
        logger.info(f"Epoch {epoch:03d}: Loss={epoch_loss / n_batches:.4f} "
                    f"(BPR={metrics_accum['bpr'] / n_batches:.4f}, "
                    f"Recon={metrics_accum['recon'] / n_batches:.4f}, "
                    f"SRA={metrics_accum['sra'] / n_batches:.4f})")

        if epoch % 5 == 0:
            res = test(model, data, top_k, config.eval_batch_size)
            logger.info(_format_metrics(res, top_k))

            # 使用 R@20 作为主要指标
            r20_idx = top_k.index(20) if 20 in top_k else 0
            curr_recall = res['recall'][r20_idx]

            if curr_recall > best_recall:
                best_recall = curr_recall
                patience = 0
                torch.save(model.state_dict(), model_save_path)
                logger.info(f"✨ New Best Recall@20: {best_recall:.4f} -> Saved")
            else:
                patience += 1
                if patience >= config.patience:
                    logger.info("Early Stopping Triggered")
                    break

    logger.info(f"Training Finished. Best Recall@20: {best_recall:.4f}")


def main():
    config = parse_args()
    set_seed(config.seed)

    # Fallback log file name (actual file determined by run.sh redirection mostly, but this creates one too)
    log_file = os.path.join("logs", f"sdkr_internal_{time.strftime('%Y%m%d_%H%M%S')}.log")
    logger = get_logger(log_file)

    logger.info(f"Start training SDKR on {config.dataset}")

    data = load_data(config)
    model = SDKR(config, data, logger).to(config.device)
    train(model, data, config, logger)


if __name__ == "__main__":
    main()