"""
SASRec (Self-Attentive Sequential Recommendation) 模型的 PyTorch 实现
该模型使用 Transformer 架构来进行序列推荐
"""

import numpy as np
import torch
from routing_transformer.routing_transformer import SelfAttention, FeedForward


class PointWiseFeedForward(torch.nn.Module):
    """
    逐点前馈网络模块

    Args:
        hidden_units: 隐藏层单元数
        dropout_rate: Dropout比率
    """

    def __init__(self, hidden_units, dropout_rate):
        super(PointWiseFeedForward, self).__init__()

        # 第一个1D卷积层
        self.conv1 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
        self.dropout1 = torch.nn.Dropout(p=dropout_rate)
        self.relu = torch.nn.ReLU()
        # 第二个1D卷积层
        self.conv2 = torch.nn.Conv1d(hidden_units, hidden_units, kernel_size=1)
        self.dropout2 = torch.nn.Dropout(p=dropout_rate)

    def forward(self, inputs):
        """
        前向传播

        Args:
            inputs: 输入张量

        Returns:
            outputs: 经过前馈网络处理后的张量
        """
        # 转置输入以适应Conv1D的要求 (N, Length, C) -> (N, C, Length)
        outputs = self.dropout2(
            self.conv2(self.relu(self.dropout1(self.conv1(inputs.transpose(-1, -2)))))
        )
        outputs = outputs.transpose(-1, -2)  # 转回原来的形状
        outputs += inputs  # 残差连接
        return outputs


class SASRec(torch.nn.Module):
    """
    SASRec模型主类

    Args:
        user_num: 用户数量
        item_num: 物品数量
        args: 模型参数配置
    """

    def __init__(self, user_num, item_num, args):
        super(SASRec, self).__init__()

        self.user_num = user_num
        self.item_num = item_num
        self.dev = args.device

        # 物品嵌入层，padding_idx=0表示填充位置
        self.item_emb = torch.nn.Embedding(
            self.item_num + 1, args.hidden_units, padding_idx=0
        )
        # 位置嵌入层
        self.pos_emb = torch.nn.Embedding(
            args.maxlen + 1, args.hidden_units, padding_idx=0
        )
        self.emb_dropout = torch.nn.Dropout(p=args.dropout_rate)

        # 用于自注意力机制的层标准化
        self.attention_layernorms = torch.nn.ModuleList()
        # 自注意力层
        self.attention_layers = torch.nn.ModuleList()
        # 前馈网络的层标准化
        self.forward_layernorms = torch.nn.ModuleList()
        # 前馈网络层
        self.forward_layers = torch.nn.ModuleList()

        # 最后的层标准化
        self.last_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)

        # 构建多个Transformer块
        for _ in range(args.num_blocks):
            new_attn_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.attention_layernorms.append(new_attn_layernorm)

            # TODO 多头注意力层
            # """
            # new_attn_layer = torch.nn.MultiheadAttention(
            #     args.hidden_units, args.num_heads, args.dropout_rate
            # )
            # self.attention_layers.append(new_attn_layer)

            # new_fwd_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            # self.forward_layernorms.append(new_fwd_layernorm)
            # """

            # # 多头注意力层
            # new_attn_layer = torch.nn.MultiheadAttention(
            #     args.hidden_units, args.num_heads, args.dropout_rate
            # )
            # 使用Routing Transformer的自注意力机制
            new_attn_layer = SelfAttention(
                dim=args.hidden_units,
                depth=args.num_blocks,
                max_seq_len=args.maxlen,
                heads=args.num_heads,
                local_attn_heads=args.num_heads // 2,  # 一半头用于局部注意力
                window_size=25,  # 调整窗口大小为25，因为200可以被25整除
                dim_head=args.hidden_units // args.num_heads,
                local_attn_window_size=50,
                local_attn_radius_blocks=1,
                causal=True,  # SASRec需要因果注意力
                attn_dropout=args.dropout_rate,
                dropout=args.dropout_rate,
                kmeans_ema_decay=0.999,
                commitment_factor=1e-4,
                rel_pos_emb=True,
            )
            self.attention_layers.append(new_attn_layer)

            new_fwd_layernorm = torch.nn.LayerNorm(args.hidden_units, eps=1e-8)
            self.forward_layernorms.append(new_fwd_layernorm)
            # new_fwd_layer = PointWiseFeedForward(args.hidden_units, args.dropout_rate)
            # 使用Routing Transformer的前馈网络
            new_fwd_layer = FeedForward(
                dim=args.hidden_units,
                mult=4,
                dropout=args.dropout_rate,
                activation=None,  # 使用默认的GELU
                glu=False,
            )
            self.forward_layers.append(new_fwd_layer)

    def log2feats(self, log_seqs):
        # TODO 这里需要修改
        """
        将序列日志转换为特征表示

        Args:
            log_seqs: 用户交互序列

        Returns:
            log_feats: 序列的特征表示
        """
        # 获取物品嵌入
        seqs = self.item_emb(torch.LongTensor(log_seqs).to(self.dev))
        seqs *= self.item_emb.embedding_dim**0.5  # 缩放嵌入

        # 生成位置编码
        poss = np.tile(np.arange(1, log_seqs.shape[1] + 1), [log_seqs.shape[0], 1])
        poss *= log_seqs != 0  # 只为非填充位置添加位置编码
        seqs += self.pos_emb(torch.LongTensor(poss).to(self.dev))
        seqs = self.emb_dropout(seqs)

        # 创建注意力掩码以确保因果关系（只能看到过去的信息）
        tl = seqs.shape[1]
        attention_mask = ~torch.tril(
            torch.ones((tl, tl), dtype=torch.bool, device=self.dev)
        )

        for i in range(len(self.attention_layers)):
            Q = self.attention_layernorms[i](seqs)
            # 使用Routing Transformer的注意力机制
            mha_outputs = self.attention_layers[i](
                Q,
                context=None,
                input_mask=~torch.BoolTensor(log_seqs == 0).to(
                    self.dev
                ),  # 转换为有效位置掩码
                pos_emb=None,  # 位置编码已在输入中添加
            )[
                0
            ]  # 只取输出，不要辅助损失
            seqs = Q + mha_outputs  # 残差连接

            # 前馈网络处理
            seqs = self.forward_layernorms[i](seqs)
            seqs = self.forward_layers[i](seqs) + seqs  # 残差连接

        log_feats = self.last_layernorm(seqs)

        return log_feats

    def forward(self, user_ids, log_seqs, pos_seqs, neg_seqs):
        """
        模型训练的前向传播

        Args:
            user_ids: 用户ID
            log_seqs: 用户交互序列
            pos_seqs: 正样本序列
            neg_seqs: 负样本序列

        Returns:
            pos_logits: 正样本的预测分数
            neg_logits: 负样本的预测分数
        """
        log_feats = self.log2feats(log_seqs)

        pos_embs = self.item_emb(torch.LongTensor(pos_seqs).to(self.dev))
        neg_embs = self.item_emb(torch.LongTensor(neg_seqs).to(self.dev))

        # 计算正负样本的预测分数
        pos_logits = (log_feats * pos_embs).sum(dim=-1)
        neg_logits = (log_feats * neg_embs).sum(dim=-1)

        return pos_logits, neg_logits

    def predict(self, user_ids, log_seqs, item_indices):
        """
        模型推理阶段的预测

        Args:
            user_ids: 用户ID
            log_seqs: 用户交互序列
            item_indices: 待预测的物品索引

        Returns:
            logits: 预测分数
        """
        log_feats = self.log2feats(log_seqs)

        # 只使用序列的最后一个位置的特征进行预测
        final_feat = log_feats[:, -1, :]

        # 获取候选物品的嵌入
        item_embs = self.item_emb(torch.LongTensor(item_indices).to(self.dev))

        # 计算预测分数
        logits = item_embs.matmul(final_feat.unsqueeze(-1)).squeeze(-1)

        return logits
