# model.py
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from const import *
import numpy as np
from torch_geometric.nn import GATConv
import os
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
from torch_geometric.data import Data

def init_weights(m):
    if isinstance(m, nn.Linear):
        nn.init.xavier_normal_(m.weight)  # 使用 Xavier 初始化
        if m.bias is not None:
            nn.init.zeros_(m.bias)  # 将偏置初始化为 0

class CustomDataset(Dataset):
    def __init__(self, user_ids, item_ids, labels, history, neighbor_emb):
        self.user_ids = user_ids
        self.item_ids = item_ids
        self.labels = labels
        self.history = history  # 真实历史数据
        self.neighbor_emb = neighbor_emb  # 真实邻居嵌入

    def __len__(self):
        return len(self.labels)

    def __getitem__(self, idx):
        user_id = self.user_ids[idx]
        item_id = self.item_ids[idx]
        label = self.labels[idx]

        history = self.history[user_id]  # 通过用户 ID 索引历史数据
        neighbor_emb = self.neighbor_emb[user_id]  # 通过用户 ID 索引邻居嵌入

        return (user_id, item_id, history, neighbor_emb), label

def init_weights(m):
    """自定义初始化方法"""
    if isinstance(m, nn.Linear):
        nn.init.xavier_normal_(m.weight)  # 使用 Xavier 初始化
        if m.bias is not None:
            nn.init.zeros_(m.bias)  # 将偏置初始化为 0

class GraphRecommendationModel(nn.Module):
    def __init__(self, num_users, num_items, hidden_dim, num_heads=4, dropout=0.2):
        super(GraphRecommendationModel, self).__init__()

        # 初始化embedding层
        self.user_embedding = nn.Embedding(num_users, hidden_dim)
        self.item_embedding = nn.Embedding(num_items, hidden_dim)

        nn.init.xavier_normal_(self.user_embedding.weight)  # 使用 Xavier 初始化
        nn.init.xavier_normal_(self.item_embedding.weight)  # 使用 Xavier 初始化

        # GAT层初始化：
        # 第一层：使用 num_heads 个 head，输出维度为 hidden_dim * num_heads
        self.gat1 = GATConv(hidden_dim, hidden_dim, heads=num_heads, dropout=dropout, concat=True)
        # 第二层：使用单 head，输出维度恢复为 hidden_dim
        self.gat2 = GATConv(hidden_dim * num_heads, hidden_dim, heads=1, dropout=dropout, concat=False)

        # 全连接层
        self.fc1 = nn.Linear(hidden_dim, hidden_dim)
        self.fc2 = nn.Linear(hidden_dim, hidden_dim)
        self.fc3 = nn.Linear(hidden_dim, hidden_dim)
        self.output_layer = nn.Linear(hidden_dim, 1)

        # Dropout层
        self.dropout = nn.Dropout(dropout)

        # 权重初始化
        self.apply(init_weights)

    def forward(self, user_ids, item_ids, history=None, user_neighbor_emb=None, flag=1):
        # 确保输入在与embedding相同的设备上
        device = self.user_embedding.weight.device
        user_ids = user_ids.to(device)
        item_ids = item_ids.to(device)
        num_items = item_ids.shape[0]
        if user_neighbor_emb.dim() == 4:
            # 假设要取第一个样本，即去掉 batch 维度
            user_neighbor_emb = user_neighbor_emb[0]

        new_user_data = user_neighbor_emb[:num_items]

        #(num_items,100,64)
        if flag == 0:
            # 获取每个 user_id 在 user_ids 中第一次出现的下标
            unique_user_ids, user_first_occurrence_idx = torch.unique(user_ids, return_inverse=True)
            # 获取每个 item_id 在 item_ids 中第一次出现的下标
            unique_item_ids, item_first_occurrence_idx = torch.unique(item_ids, return_inverse=True)
            # item_ids 的下标需要从 user_ids 的长度开始
            item_first_occurrence_idx = item_first_occurrence_idx + len(unique_user_ids)
            # 构建 edge_index
            edge_index = torch.stack([user_first_occurrence_idx, item_first_occurrence_idx], dim=0)
        else:
            # 默认情况，使用 create_edge_index 方法
            edge_index = self.create_edge_index(user_ids, history)
            # 去除重复的边
            edge_index = torch.unique(edge_index, dim=1)

        data = Data(x=torch.cat([user_ids.unsqueeze(-1), item_ids.unsqueeze(-1)], dim=0), edge_index=edge_index)

        # 获取user和item的embedding
        user_emb = self.user_embedding(user_ids)
        item_emb = self.item_embedding(item_ids)

        # 创建图数据结构并处理
        x = torch.cat([user_emb, item_emb], dim=0)

        # GNN层传递（使用GAT替换GCN）
        x = self.gat1(x, data.edge_index)
        x = F.leaky_relu(x, negative_slope=0.01)  # 使用 LeakyReLU 激活函数，negative_slope 是负半轴的斜率

        x = self.gat2(x, data.edge_index)
        x = F.leaky_relu(x, negative_slope=0.01)  # 使用 LeakyReLU 激活函数，negative_slope 是负半轴的斜率




        # ① 将 x 分为两部分
        x_first = x[:num_items]  # 形状 (num_items, 64)
        x_second = x[num_items:]  # 形状 (num_items, 64)
        x_avg = (x_first + x_second) / 2  # 融合得到 (num_items, 64)

        # ② 对 new_user_data 沿着100这个维度求均值
        user_data_avg = new_user_data.mean(dim=1)  # 形状 (num_items, 64)


        # ③ 将上述两部分结果再融合，例如取平均
        result = (x_avg + user_data_avg) / 2  # 最终形状 (num_items, 64)

        # 使用 x_aggregated 作为最终的输入计算输出
        combined_emb = F.relu(self.fc1(result))
        combined_emb = self.dropout(combined_emb)
        combined_emb = F.relu(self.fc2(combined_emb))
        combined_emb = self.dropout(combined_emb)
        combined_emb = F.relu(self.fc3(combined_emb))

        output = self.output_layer(combined_emb)

        # 使用 Sigmoid 激活函数将输出值限制在 [0, 1] 范围内
        normalized_output = torch.sigmoid(output)
        # 将 [0, 1] 范围的输出映射到 [1, 5] 范围
        scaled_output = normalized_output * 4 + 1  # 输出范围 [1, 5]

        return scaled_output

    def train_step(self, user_ids, item_ids, history, neighbor_emb, labels, scaler, accumulation_steps=4):
        # 前向传播
        output = self(user_ids, item_ids, history, neighbor_emb)  # 调用 forward 方法并传递必要的输入
        loss = torch.nn.functional.mse_loss(output, labels)
        loss = torch.sqrt(loss)
        loss = loss / accumulation_steps  # 梯度累积的等效缩放

        # 反向传播
        if torch.isnan(loss).any() or torch.isinf(loss).any():
            return None, None, None  # 如果损失为 NaN 或 Inf，返回 None

        # 反向传播（混合精度）
        scaler.scale(loss).backward()

        # 打印参数的梯度，确保梯度正确计算
        param_grads = []
        for idx, param in enumerate(self.parameters()):
            if param.grad is not None:
                param_grads.append(param.grad.clone())  # 克隆梯度以便返回
            else:
                param_grads.append(torch.zeros_like(param.data))  # 没有梯度的参数返回零梯度

        return loss, output, param_grads

    def create_edge_index(self, user_ids, history):
        """
        创建边索引，用于图神经网络的输入。
        用户和历史物品之间的边，并忽略填充的物品。
        填充值通过遍历 history 的最后一个非填充物品确定。
        """
        # 确定填充物品ID（history中的最后一个非填充物品后面的值）
        non_zero_history = history[history != 0]
        padding_value = non_zero_history[-1] if non_zero_history.size(0) > 0 else None

        # 如果填充值为空，则直接返回空的 edge_index
        if padding_value is None:
            return torch.zeros((2, 0), dtype=torch.long, device=user_ids.device)

        # 找到非填充的物品及其下标
        valid_item_indices = (history != padding_value).nonzero(as_tuple=False)[:, 0]  # 获取非填充物品的下标

        # 创建 edge_index，初始化为零
        edge_index = torch.zeros((2, valid_item_indices.size(0)), dtype=torch.long, device=user_ids.device)
        # 所有边的起始节点是 0
        edge_index[0] = torch.zeros(valid_item_indices.size(0), dtype=torch.long, device=user_ids.device)
        # 所有边的终止节点是有效物品在 history 中的下标
        edge_index[1] = valid_item_indices + 1

        return edge_index
