# ====================================================================================
# 文件: src/modules.py
# 描述: [V_Final] 修复 GMAE 损失函数为 MSE，增强稳定性
# ====================================================================================

import random
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch_scatter import scatter_add


class LightGCNConv(nn.Module):
    def __init__(self):
        super(LightGCNConv, self).__init__()

    def forward(self, x_src, edge_index, edge_weight, target_node_count):
        row, col = edge_index
        # 索引裁剪保护
        col = torch.clamp(col, 0, x_src.size(0) - 1)
        row = torch.clamp(row, 0, target_node_count - 1)

        x_j = x_src[col]
        msg = x_j * edge_weight.unsqueeze(1)
        out = scatter_add(msg, row, dim=0, dim_size=target_node_count)
        return out


class BaseGNN(nn.Module):
    def __init__(self, n_layers, edge_index, edge_weight, num_nodes):
        super(BaseGNN, self).__init__()
        self.n_layers = n_layers
        self.num_nodes = num_nodes
        self.register_buffer("edge_index", edge_index)
        self.register_buffer("edge_weight", edge_weight)
        self.layers = nn.ModuleList([LightGCNConv() for _ in range(n_layers)])

    def forward(self, x):
        emb = x
        embs = [x]
        for layer in self.layers:
            emb = layer(emb, self.edge_index, self.edge_weight, self.num_nodes)
            embs.append(emb)
        return torch.mean(torch.stack(embs, dim=0), dim=0)


# --- GMAE Components ---
class GMAE_Encoder(nn.Module):
    def __init__(self, in_dim, out_dim, n_layers):
        super().__init__()
        self.proj = nn.Linear(in_dim, out_dim)
        nn.init.xavier_uniform_(self.proj.weight)
        self.layers = nn.ModuleList([LightGCNConv() for _ in range(n_layers)])

    def forward(self, x, edge_index, edge_weight, target_nodes):
        h = self.proj(x)
        for layer in self.layers:
            h = layer(h, edge_index, edge_weight, target_nodes)
        return h


class GMAE(nn.Module):
    def __init__(self, gcn_layers, mlp_layers, n_features, n_nodes, n_entities, emb_dim,
                 gcn_edge_index_uk, gcn_edge_weight_uk, gcn_edge_index_ku, gcn_edge_weight_ku,
                 temperature, mask_min, mask_max):
        super().__init__()
        self.n_nodes = n_nodes
        self.n_entities = n_entities
        self.n_features = n_features
        self.mask_min = mask_min
        self.mask_max = mask_max

        self.uk_index = gcn_edge_index_uk
        self.uk_weight = gcn_edge_weight_uk
        self.ku_index = gcn_edge_index_ku
        self.ku_weight = gcn_edge_weight_ku

        self.user_encoder = GMAE_Encoder(emb_dim, emb_dim, gcn_layers)
        self.entity_encoder = GMAE_Encoder(emb_dim, emb_dim, gcn_layers)
        self.feat_proj = nn.Linear(n_features, emb_dim)

        # Decoder: 映射回原始特征空间
        self.decoder = nn.Sequential(
            nn.Linear(emb_dim, emb_dim * 2),
            nn.ReLU(),
            nn.Linear(emb_dim * 2, n_features)
        )

    def get_gnn_embeddings(self, x_u_feat, x_e_emb):
        # Entity -> User
        z_u = self.user_encoder(x_e_emb, self.ku_index, self.ku_weight, self.n_nodes)
        # User -> Entity
        u_init = self.feat_proj(x_u_feat)
        z_e = self.entity_encoder(u_init, self.uk_index, self.uk_weight, self.n_entities)
        return z_u, z_e

    def masking(self, x):
        # 随机 Mask 掉部分用户的特征 (设为0)
        mask = torch.rand(x.size()) < random.uniform(self.mask_min, self.mask_max)
        mask = mask.to(x.device)
        x_masked = x.clone()
        x_masked[mask] = 0.0
        return x_masked, mask

    def forward(self, x_u_feat, x_e_emb):
        # 1. Masking
        x_masked, mask_indices = self.masking(x_u_feat)

        # 2. Encoding with masked features
        z_u_masked, z_e_all = self.get_gnn_embeddings(x_masked, x_e_emb)

        # 3. Decoding to reconstruct features
        recon_feat = self.decoder(z_u_masked)

        # 4. Loss Calculation (MSE)
        # [修正] 使用 MSE Loss 而不是 BCE，数值更稳定，且符合特征重建任务
        loss = F.mse_loss(recon_feat[mask_indices], x_u_feat[mask_indices])

        return loss, z_u_masked, z_e_all


class BPRLoss(nn.Module):
    def __init__(self):
        super().__init__()

    def forward(self, u_emb, i_emb, users, pos, neg):
        u = u_emb[users]
        p = i_emb[pos]
        n = i_emb[neg]
        pos_scores = (u * p).sum(1)
        neg_scores = (u * n).sum(1)
        return F.softplus(-(pos_scores - neg_scores)).mean()