# ====================================================================================
# 文件: src/model.py
# 描述: [V5 修复版] 增加 MLP Projector 解决空间错位；回归稳健逻辑
# ====================================================================================

import torch
import torch.nn as nn
import torch.nn.functional as F
from modules import BaseGNN, GMAE, BPRLoss
from utils import info_nce_loss


class SDKR(nn.Module):
    def __init__(self, config, data, logger=None):
        super().__init__()
        self.config = config
        self.device = config.device
        self.n_users = data.n_users
        self.n_items = data.n_items
        self.n_entities = data.n_entities
        self.dim = config.embedding_dim
        self.n_layers = config.n_layers

        # 初始权重 (会被 main.py 的 Warmup 覆盖)
        self.alpha = getattr(config, "alpha", 0.1)
        self.beta = getattr(config, "beta", 0.1)
        self.gamma = getattr(config, "gamma", 0.1)

        # 损失权重 (回归正常范围)
        self.bpr_loss_weight = getattr(config, "bpr_loss_weight", 1.0)
        self.recon_loss_weight = getattr(config, "recon_loss_weight", 0.1)

        # SRA-CL 配置
        self.sracl_config = getattr(config, "sracl_config", {"enable": False})
        self.sracl_enable = self.sracl_config.get("enable", False)
        self.sracl_weight = self.sracl_config.get("weight", 0.1)
        self.sracl_temp = self.sracl_config.get("temperature", 0.1)

        # 1. ID Embeddings (主骨干)
        self.user_embedding = nn.Embedding(self.n_users, self.dim)
        self.item_embedding = nn.Embedding(self.n_items, self.dim)
        self.entity_embedding = nn.Embedding(self.n_entities, self.dim)
        nn.init.xavier_uniform_(self.user_embedding.weight)
        nn.init.xavier_uniform_(self.item_embedding.weight)
        nn.init.xavier_uniform_(self.entity_embedding.weight)

        # 2. GNN Encoders
        self.ui_encoder = BaseGNN(self.n_layers, data.graph_ui_edge_index, data.graph_ui_edge_weight,
                                  self.n_users + self.n_items)
        self.ik_encoder = BaseGNN(self.n_layers, data.graph_ik_edge_index, data.graph_ik_edge_weight, self.n_entities)
        self.ii_encoder = BaseGNN(self.n_layers, data.graph_ii_edge_index, data.graph_ii_edge_weight, self.n_items)

        # 3. [新增] 空间对齐投影层 (Projectors)
        # 将语义向量 (Semantic Space) 映射到 ID 向量空间 (ID Space)
        self.item_sem_projector = nn.Sequential(
            nn.Linear(self.dim, self.dim),
            nn.Tanh()  # Tanh 激活有助于对齐分布
        )
        self.user_gmae_projector = nn.Sequential(
            nn.Linear(self.dim, self.dim),
            nn.Tanh()
        )

        # 4. GMAE Module
        user_recon_cfg = config.user_recon
        self.gmae = None
        if data.graph_uk_edge_index.numel() > 0:
            self.gmae = GMAE(
                gcn_layers=user_recon_cfg['encoder_layers'],
                mlp_layers=user_recon_cfg['decoder_layers'],
                n_features=data.user_interest_features.size(1),
                n_nodes=self.n_users,
                n_entities=self.n_entities,
                emb_dim=self.dim,
                gcn_edge_index_uk=data.graph_uk_edge_index,
                gcn_edge_weight_uk=data.graph_uk_edge_weight,
                gcn_edge_index_ku=data.graph_ku_edge_index,
                gcn_edge_weight_ku=data.graph_ku_edge_weight,
                temperature=user_recon_cfg['temperature'],
                mask_min=user_recon_cfg['mask_min_rate'],
                mask_max=user_recon_cfg['mask_max_rate']
            )
            self.register_buffer("user_interest_features", data.user_interest_features.float())

        self.bpr_loss_fn = BPRLoss()

    def _encode_ui(self):
        all_emb = torch.cat([self.user_embedding.weight, self.item_embedding.weight], dim=0)
        encoded = self.ui_encoder(all_emb)
        return torch.split(encoded, [self.n_users, self.n_items], dim=0)

    def forward(self):
        # --- 主视图 (ID View) ---
        u_ui, i_ui = self._encode_ui()

        # --- 语义视图 (Semantic Views) ---
        # KG View
        i_ik = self.ik_encoder(self.entity_embedding.weight)[:self.n_items]
        # CoLaKG View
        i_ii_raw = self.ii_encoder(self.item_embedding.weight)

        # [关键] 投影: 将语义特征映射到 ID 空间
        i_ii = self.item_sem_projector(i_ii_raw)

        # --- 用户 GMAE 视图 ---
        recon_loss = torch.tensor(0.0, device=self.device)
        u_gmae = torch.zeros_like(u_ui)

        if self.gmae is not None:
            if self.training:
                r_loss, u_gmae_raw, _ = self.gmae(self.user_interest_features, self.entity_embedding.weight)
                recon_loss = r_loss
                # [关键] 投影 GMAE 输出
                u_gmae = self.user_gmae_projector(u_gmae_raw)
            else:
                u_gmae_raw, _ = self.gmae.get_gnn_embeddings(self.user_interest_features, self.entity_embedding.weight)
                u_gmae = self.user_gmae_projector(u_gmae_raw)

        # --- 融合 (由 main.py 控制 alpha/beta/gamma) ---
        # 简单加权融合最稳健 (Attention 容易在早期引入噪声)
        u_final = u_ui + self.alpha * u_gmae
        i_final = i_ui + self.beta * i_ik + self.gamma * i_ii

        return u_final, i_final, recon_loss, i_ui, i_ii

    def calculate_loss(self, users, pos_items, neg_items):
        u_final, i_final, recon_loss, i_ui_view, i_semantic_view = self.forward()

        # 1. 主任务: BPR Loss
        bpr_loss = self.bpr_loss_fn(u_final, i_final, users, pos_items, neg_items)

        # 2. 辅助任务: SRA-CL Loss
        sracl_loss = torch.tensor(0.0, device=self.device)
        if self.sracl_enable:
            batch_i_ui = i_ui_view[pos_items]
            batch_i_sem = i_semantic_view[pos_items]

            # InfoNCE 需要归一化
            sracl_loss = info_nce_loss(batch_i_ui, batch_i_sem, self.sracl_temp)

        # 总损失
        total_loss = (self.bpr_loss_weight * bpr_loss) + \
                     (self.recon_loss_weight * recon_loss) + \
                     (self.sracl_weight * sracl_loss)

        return total_loss, bpr_loss, recon_loss, sracl_loss

    @torch.no_grad()
    def predict(self, users):
        u_final, i_final, _, _, _ = self.forward()
        return torch.matmul(u_final[users], i_final.t())