# trainer/trainer.py
import os
import torch
import logging
import numpy as np
import torch.optim as optim
from time import time
from tqdm import tqdm
from torch.nn.utils.clip_grad import clip_grad_norm_
import collections

from utils.helpers import save_model
from utils.metrics import calc_metrics_at_k


class Trainer:
    """
    训练器类，负责模型的完整训练、评估和保存流程。
    【优化版】:
    - 适配PyTorch DataLoader。
    - 在训练循环中执行高效的负采样。
    """

    def __init__(self, args, model, data, device):
        self.args = args
        self.model = model
        self.data = data
        self.device = device
        self.logger = logging.getLogger()
        self.optimizer = optim.Adam(model.parameters(), lr=args.lr)
        self.stopping_steps = args.stopping_steps
        self.valid_metric = args.valid_metric
        self.best_metric_value = -np.inf
        self.best_epoch = -1
        self.cur_stopping_steps = 0
        self.save_dir = args.save_dir
        os.makedirs(self.save_dir, exist_ok=True)

    def _train_epoch(self, epoch_idx):
        self.model.train()
        total_loss, total_cf, total_l2, total_cl = 0, 0, 0, 0

        # 【核心优化】: 直接迭代PyTorch DataLoader
        iter_data = tqdm(
            self.data.train_loader,
            total=len(self.data.train_loader),
            ncols=120,
            desc=f"Train Epoch {epoch_idx:03d}"
        )

        for users, pos_items in iter_data:
            # 1. 将数据移动到指定设备
            users = users.to(self.device)
            pos_items = pos_items.to(self.device)

            # 2. 【核心优化】高效的批内负采样
            neg_items = torch.randint(0, self.data.n_items, (len(users),), device=self.device)
            # 注意: 这种简单的随机采样可能导致“假阴性”（采样到用户实际喜欢的但不在训练集中的项目）。
            # 在效率优先的场景下是可接受的，更复杂的采样可以后续实现。

            # 3. 梯度清零
            self.optimizer.zero_grad()

            # 4. 前向传播
            loss, cf_loss, l2_loss, cl_loss = self.model.calculate_loss(users, pos_items, neg_items)

            # 5. 反向传播和优化
            loss.backward()
            if self.args.clip_grad_norm > 0:
                clip_grad_norm_(self.model.parameters(), self.args.clip_grad_norm)
            self.optimizer.step()

            # 6. 累加损失
            total_loss += loss.item()
            total_cf += cf_loss.item()
            total_l2 += l2_loss.item()
            total_cl += cl_loss.item()

            iter_data.set_postfix({
                "Loss": f"{loss.item():.4f}", "CF": f"{cf_loss.item():.4f}", "CL": f"{cl_loss.item():.4f}"
            })

        n_batch = len(self.data.train_loader)
        return total_loss / n_batch, total_cf / n_batch, total_l2 / n_batch, total_cl / n_batch

    @torch.no_grad()
    def evaluate(self):
        """在测试集上评估模型性能 (全排序评估)。"""
        self.model.eval()

        Ks = eval(self.args.Ks)
        all_user_ids = list(self.data.test_user_dict.keys())
        n_test_users = len(all_user_ids)

        all_metrics = collections.defaultdict(list)

        batch_size = self.args.cf_batch_size * 4
        n_user_batches = (n_test_users + batch_size - 1) // batch_size

        for i in tqdm(range(n_user_batches), desc="Evaluating", ncols=100):
            start = i * batch_size
            end = min((i + 1) * batch_size, n_test_users)
            if start >= end: continue

            batch_user_ids = all_user_ids[start:end]
            batch_user_tensor = torch.LongTensor(batch_user_ids).to(self.device)

            batch_scores = self.model.predict(batch_user_tensor, None)

            # 屏蔽训练集交互
            for j, user_id in enumerate(batch_user_ids):
                if user_id in self.data.train_user_dict:
                    train_items = self.data.train_user_dict[user_id]
                    batch_scores[j][train_items] = -np.inf

            batch_metrics = calc_metrics_at_k(
                batch_scores.cpu().numpy(),
                self.data.test_user_dict,
                batch_user_ids,
                Ks
            )

            for metric, values in batch_metrics.items():
                all_metrics[metric].extend(values)

        final_metrics = {metric: np.mean(values) for metric, values in all_metrics.items()}
        return final_metrics

    def train(self):
        """完整的训练和评估流程。"""
        for epoch in range(self.args.n_epoch):
            t1 = time()
            avg_loss, avg_cf, avg_l2, avg_cl = self._train_epoch(epoch)
            t2 = time()
            self.logger.info(
                f"Epoch {epoch:03d} [{t2 - t1:.1f}s]: Total Loss={avg_loss:.4f} | CF={avg_cf:.4f}, L2={avg_l2:.4f}, CL={avg_cl:.4f}"
            )

            if (epoch + 1) % self.args.evaluate_every == 0:
                t3 = time()
                metrics = self.evaluate()
                t4 = time()

                metrics_str = ", ".join([f"{k}={v:.4f}" for k, v in metrics.items()])
                self.logger.info(f"Evaluation [{t4 - t3:.1f}s]: {metrics_str}")

                current_metric_value = metrics.get(self.valid_metric, -np.inf)
                if current_metric_value > self.best_metric_value:
                    self.best_metric_value = current_metric_value
                    self.cur_stopping_steps = 0
                    old_best_epoch = self.best_epoch
                    self.best_epoch = epoch
                    save_model(self.model, self.save_dir, epoch, old_best_epoch)
                    self.logger.info(
                        f"✨ 新的最佳模型已保存! Epoch: {epoch}, {self.valid_metric}: {current_metric_value:.4f}")
                else:
                    self.cur_stopping_steps += 1
                    self.logger.info(f"性能未提升，已连续 {self.cur_stopping_steps} 个评估周期。")
                    if self.cur_stopping_steps >= self.stopping_steps:
                        self.logger.info(f"在 epoch {epoch} 触发早停。最佳epoch为 {self.best_epoch}。")
                        break

        self.logger.info(
            f"训练结束。最佳epoch: {self.best_epoch}, 最佳 {self.valid_metric}: {self.best_metric_value:.4f}")