from math import sqrt
import torch.nn as nn
import torch
import torch.nn.functional as F
from torch.distributions import Categorical
from torch_scatter import segment_coo

class Embedding(nn.Module):
    def __init__(self,
                 vocab_size_dic,
                 id_embedding_dim=64,
                 simple_embedding_dim=8,
                 ):
        """
        :param vocab_size_dic: 字典，存储用户行为中的各个特征以及其vocab size
                                    格式-->{
                                            'inherent_feature':{'item_id':vocab_num, 'author':vocab_num,...},
                                            'cross_feature':{'play_time':vocab_num, 'date':vocab_num,...}
                                        }
        :param id_embedding_dim: vocab 数量多的特征的embed dim
        :param simple_embedding_dim: vocab 数量少的embed dim
        """
        super(Embedding, self).__init__()
        self.vocab_size_dic = vocab_size_dic
        self.simple_embedding_dim = simple_embedding_dim
        self.id_embedding_dim = id_embedding_dim
        vocab_num_list = [] # [(vocab_num, embedding_dim),...]
        h = 0
        for feature, vocab_num in vocab_size_dic['inherent_feature'].items():
            if 'id' in feature:
                vocab_num_list.append((vocab_num, id_embedding_dim))
                h += id_embedding_dim
            else:
                vocab_num_list.append((vocab_num, simple_embedding_dim))
                h += simple_embedding_dim

        c = 0

        for feature,vocab_num  in vocab_size_dic['cross_feature'].items():
            if 'id' in feature:
                vocab_num_list.append((vocab_num, id_embedding_dim))
                c += id_embedding_dim
            else:
                vocab_num_list.append((vocab_num, simple_embedding_dim))
                c += simple_embedding_dim

        self.h = h
        self.c = c
        self.vocab_size_list = [vocab_num for vocab_num, _ in vocab_num_list]

        self.embed_layer_dic = nn.ModuleDict()
        for idx, (vocab_num, embed_dim) in enumerate(vocab_num_list):
            if embed_dim == id_embedding_dim:
                layer_name = 'item_id_embedding_{}'.format(idx)
            else:
                layer_name = str(idx)
            self.embed_layer_dic[layer_name] = nn.Embedding(vocab_num, embed_dim)
        self.mlp = nn.Sequential(
            nn.Linear(h+c,h+c),
            nn.ReLU6()
        )

    def forward(self, s):
        """

        :param s: b, behaviors_num, behavior_feature_num:( )
        :return:
        """
        embed_list = []
        index = 0
        for embed_layer in self.embed_layer_dic.values():
            input = s.narrow(-1, start=index, length=1)
            embed_list.append(embed_layer(input).squeeze(-2))
            index += 1
            if index >= s.shape[-1]:
                break
        final_embed = torch.cat(embed_list, dim=-1)
        return self.mlp(final_embed)

class CustomGRU(nn.Module):
    def __init__(self, input_size, hidden_size):
        super(CustomGRU, self).__init__()
        self.hidden_size = hidden_size

        # 合并的权重矩阵 (更高效)
        # 输入权重: [W_z, W_r, W_h] 形状 (input_size, 3 * hidden_size)
        self.W = nn.Parameter(torch.Tensor(input_size, 3 * hidden_size))

        # 隐藏状态权重: [U_z, U_r, U_h] 形状 (hidden_size, 3 * hidden_size)
        self.U = nn.Parameter(torch.Tensor(hidden_size, 3 * hidden_size))

        # 合并的偏置: [b_z, b_r, b_h] 形状 (3 * hidden_size)
        self.bias = nn.Parameter(torch.Tensor(3 * hidden_size))

        self.reset_parameters()

    def reset_parameters(self):
        # Xavier 均匀初始化
        nn.init.xavier_uniform_(self.W)
        nn.init.xavier_uniform_(self.U)
        # 偏置初始化为零
        nn.init.zeros_(self.bias)

    def forward(self, x, h_prev):
        """
        x: 当前时间步输入 (batch_size, input_size)
        h_prev: 前一时间步隐藏状态 (batch_size, hidden_size)
        """
        # 一次性计算所有门控的线性变换
        gates = x @ self.W + h_prev @ self.U + self.bias

        # 将合并的结果分割成三部分
        # 每部分形状: (batch_size, hidden_size)
        z_gate, r_gate, h_candidate = gates.chunk(3, dim=1)

        # 更新门 (reset gate)
        z_t = torch.sigmoid(z_gate)

        # 重置门 (update gate)
        r_t = torch.sigmoid(r_gate)

        # 候选隐藏状态 (注意使用重置门控制历史信息)
        h_tilde = torch.tanh(x @ self.W[:, 2 * self.hidden_size:] +
                             (r_t * h_prev) @ self.U[:, 2 * self.hidden_size:] +
                             self.bias[2 * self.hidden_size:])

        # 新隐藏状态
        h_next = (1 - z_t) * h_prev + z_t * h_tilde

        return h_next

class CompressionPolicyNetwork_MLP(nn.Module):
    def __init__(self, embedding_dim, group_num, seq_len,gradient_gather=False,hidden_dim=10):
        super().__init__()
        self.encoder = nn.Sequential(
            nn.Linear(seq_len, hidden_dim, bias=False),
            nn.ReLU6(),
            nn.Linear(hidden_dim, seq_len, bias=False),
            nn.ReLU6(),
        )

        self.atte_head1 = nn.Sequential(
            nn.Linear(seq_len, hidden_dim, bias=False),
            nn.ReLU6(),
            nn.Linear(hidden_dim, seq_len, bias=False),
            nn.ReLU6(),
        )
        self.atte_head2 = nn.Sequential(
            nn.Linear(embedding_dim,1,bias=False),
            nn.ReLU6()
        )
        self.group_num = group_num - 1
        self.gradient_gather = gradient_gather
        self.logits_head = nn.Linear(embedding_dim, 1)

    def forward(self, embeddings):
        device = embeddings.device
        batch_size, seq_len = embeddings.shape[:2]
        encoder_emb = self.encoder(embeddings.transpose(1,2)).transpose(1,2)  # (batch_size, seq_len, embedding_dim)
        atte_score = self.atte_head1(embeddings.transpose(1,2)).transpose(1,2)
        atte_score = self.atte_head2(atte_score)
        logits = self.logits_head(encoder_emb).squeeze(-1)  # (batch_size, seq_len)
        selected = torch.zeros((batch_size, 0), dtype=torch.long, device=device)
        log_probs = []

        # 循环选择k个位置（无放回）
        for _ in range(self.group_num):
            # 动态生成当前掩码
            if selected.size(1) > 0:
                # 使用scatter创建新掩码（非原地操作）
                mask = torch.zeros_like(logits, dtype=torch.bool)
                src = torch.ones_like(selected, dtype=torch.bool)
                mask.scatter_(1, selected, src)
            else:
                mask = torch.zeros_like(logits, dtype=torch.bool)

            # 计算掩码后的logits
            masked_logits = logits.masked_fill(mask, -1e9)
            probs = F.softmax(masked_logits, dim=-1)

            # 采样动作
            dist = Categorical(probs)
            action = dist.sample()
            log_probs.append(dist.log_prob(action))

            # 记录已选位置（非原地拼接）
            selected = torch.cat([selected, action.unsqueeze(1)], dim=1)

        # 计算损失和奖励
        log_probs = torch.stack(log_probs, -1).sum(-1)

        if self.gradient_gather:
            rewards, avg_emb = self.compute_rewards(embeddings, selected,atte_score)
        else:
            with torch.no_grad():
                rewards, avg_emb = self.compute_rewards(embeddings, selected,atte_score)

        compress_loss = -(log_probs * rewards).mean()
        return avg_emb, compress_loss

    def compute_rewards(self,embeddings, selected_positions,atte_score):
        device = embeddings.device
        batch_size, seq_len, embed_dim = embeddings.shape
        atte_score = torch.exp(atte_score)
        k = selected_positions.size(1)

        # Sort positions and add boundaries
        sorted_pos, _ = torch.sort(selected_positions, dim=-1)
        boundaries = torch.cat([
            torch.zeros(batch_size, 1, device=device, dtype=torch.long),
            sorted_pos,
            torch.full((batch_size, 1), seq_len, device=device, dtype=torch.long)
        ], dim=-1)  # shape: (batch_size, k+2)

        # 新增维度调整步骤
        pos_grid = torch.arange(seq_len, device=device).expand(batch_size, -1)
        segment_idx = []
        # 调整后的bucketize调用
        for i in range(batch_size):
            _segment_idx = torch.bucketize(pos_grid[i], boundaries[i], right=True) - 1
            segment_idx.append(_segment_idx.unsqueeze(-1))
        segment_idx = torch.concat(segment_idx, dim = -1).T
        # Compute average embeddings using scatter
        flat_emb = embeddings.reshape(-1, embed_dim)
        flat_seg = segment_idx + (torch.arange(batch_size, device=device) * (k+1)).unsqueeze(-1)
        flat_seg = flat_seg.reshape(-1)

        atte_score = atte_score.reshape(-1,1)

        sum_atte_score = segment_coo(atte_score, flat_seg, dim_size=batch_size*(k+1), reduce='sum').reshape(batch_size,-1,1)
        sum_atte_score = sum_atte_score.gather(1, segment_idx.unsqueeze(-1)).reshape(-1,1)
        atte_score = atte_score / sum_atte_score
        flat_emb = flat_emb * atte_score
        avg_emb = segment_coo(flat_emb, flat_seg, dim_size=batch_size*(k+1), reduce='sum')
        # counts = segment_coo(
        #     torch.ones_like(flat_seg, dtype=torch.float),
        #     flat_seg,
        #     dim_size=batch_size*(k+1),
        #     reduce='sum'
        # )
        # avg_emb = sum_emb / (counts.unsqueeze(-1) + 1e-8)
        avg_emb = avg_emb.view(batch_size, k+1, embed_dim)

        # Expand average embeddings and compute similarity
        expanded_avg = avg_emb.gather(1, segment_idx.unsqueeze(-1).expand(-1, -1, embed_dim))
        similarities = F.cosine_similarity(embeddings, expanded_avg, dim=-1).mean(dim=-1)

        return similarities, avg_emb

class CompressionPolicyNetwork_Conv(nn.Module):
    def __init__(self, embedding_dim, group_num, seq_len,gradient_gather=False,hidden_dim=10):
        super().__init__()
        self.conv = nn.Parameter(torch.randn(1,seq_len, embedding_dim), requires_grad=True)
        self.group_num = group_num - 1
        self.gradient_gather = gradient_gather
        self.logits_head = nn.Linear(embedding_dim, 1)

    def forward(self, embeddings):
        device = embeddings.device
        batch_size, seq_len = embeddings.shape[:2]
        hidden_state = (self.conv * embeddings)
        hidden_state = hidden_state.cumsum(dim=1)
        logits = self.logits_head(hidden_state).squeeze(-1)  # (batch_size, seq_len)
        selected = torch.zeros((batch_size, 0), dtype=torch.long, device=device)
        log_probs = []

        # 循环选择k个位置（无放回）
        for _ in range(self.group_num):
            # 动态生成当前掩码
            if selected.size(1) > 0:
                # 使用scatter创建新掩码（非原地操作）
                mask = torch.zeros_like(logits, dtype=torch.bool)
                src = torch.ones_like(selected, dtype=torch.bool)
                mask.scatter_(1, selected, src)
            else:
                mask = torch.zeros_like(logits, dtype=torch.bool)

            # 计算掩码后的logits
            masked_logits = logits.masked_fill(mask, -1e9)
            probs = F.softmax(masked_logits, dim=-1)

            # 采样动作
            dist = Categorical(probs)
            action = dist.sample()
            log_probs.append(dist.log_prob(action))

            # 记录已选位置（非原地拼接）
            selected = torch.cat([selected, action.unsqueeze(1)], dim=1)

        # 计算损失和奖励
        log_probs = torch.stack(log_probs, -1).sum(-1)

        if self.gradient_gather:
            rewards, avg_emb = self.compute_rewards(embeddings, selected)
        else:
            with torch.no_grad():
                rewards, avg_emb = self.compute_rewards(embeddings, selected)

        compress_loss = -(log_probs * rewards).mean()
        return avg_emb, compress_loss

    def compute_rewards(self,embeddings, selected_positions):
        device = embeddings.device
        batch_size, seq_len, embed_dim = embeddings.shape
        k = selected_positions.size(1)

        # Sort positions and add boundaries
        sorted_pos, _ = torch.sort(selected_positions, dim=-1)
        boundaries = torch.cat([
            torch.zeros(batch_size, 1, device=device, dtype=torch.long),
            sorted_pos,
            torch.full((batch_size, 1), seq_len, device=device, dtype=torch.long)
        ], dim=-1)  # shape: (batch_size, k+2)

        # 新增维度调整步骤
        pos_grid = torch.arange(seq_len, device=device).expand(batch_size, -1)
        segment_idx = []
        # 调整后的bucketize调用
        for i in range(batch_size):
            _segment_idx = torch.bucketize(pos_grid[i], boundaries[i], right=True) - 1
            segment_idx.append(_segment_idx.unsqueeze(-1))
        segment_idx = torch.concat(segment_idx, dim = -1).T
        # Compute average embeddings using scatter
        flat_emb = embeddings.reshape(-1, embed_dim)
        flat_seg = segment_idx + (torch.arange(batch_size, device=device) * (k+1)).unsqueeze(-1)
        flat_seg = flat_seg.reshape(-1)

        sum_emb = segment_coo(flat_emb, flat_seg, dim_size=batch_size*(k+1), reduce='sum')
        counts = segment_coo(
            torch.ones_like(flat_seg, dtype=torch.float),
            flat_seg,
            dim_size=batch_size*(k+1),
            reduce='sum'
        )
        avg_emb = sum_emb / (counts.unsqueeze(-1) + 1e-8)
        avg_emb = avg_emb.view(batch_size, k+1, embed_dim)

        # Expand average embeddings and compute similarity
        expanded_avg = avg_emb.gather(1, segment_idx.unsqueeze(-1).expand(-1, -1, embed_dim))
        similarities = F.cosine_similarity(embeddings, expanded_avg, dim=-1).mean(dim=-1)

        return similarities, avg_emb


class CompressionPolicyNetwork_GRU(nn.Module):
    def __init__(self, embedding_dim,  group_num):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.group_num = group_num - 1

        # 使用GRUCell逐步处理序列
        self.gru_cell = nn.GRUCell(embedding_dim, embedding_dim)

        # 决策层：输出分割概率
        self.decision_fc = nn.Sequential(
            nn.Linear(embedding_dim, 2),
            nn.Softmax(dim=-1)
        )

        # 初始隐藏状态（可学习参数）
        self.h0 = nn.Parameter(torch.zeros(1, embedding_dim))

    def forward(self, x):
        """
        前向传播，返回决策logits和采样的动作
        :param x: (batch_size, seq_len, embedding_dim)
        :return:
            logits: (batch_size, seq_len, 2) 决策logits
            actions: (batch_size, seq_len) 采样的动作
        """
        batch_size, seq_len, emb_dim = x.shape
        device = x.device

        # 初始化隐藏状态和分割计数器
        h_t = self.h0.expand(batch_size, -1)  # (batch_size, hidden_dim)
        segment_count = torch.zeros(batch_size, dtype=torch.long, device=device)  # 当前分割计数

        # 存储每个时间步的输出
        all_logits = []
        all_actions = []

        # 按时间步处理序列
        for t in range(seq_len):
            # 获取当前时间步输入
            x_t = x[:, t, :]  # (batch_size, emb_dim)

            # 更新GRU隐藏状态
            h_t = self.gru_cell(x_t, h_t)

            # 计算分割决策logits
            logits_t = self.decision_fc(h_t)  # (batch_size, 2)

            # 创建掩码：如果已达到最大分割数，则禁止分割
            mask = segment_count < self.group_num - 1
            adjusted_logits = logits_t.clone()
            adjusted_logits[~mask, 1] = -1e9  # 将禁止分割的位置设为负无穷

            # 采样动作
            probs = F.softmax(adjusted_logits, dim=-1)
            dist = Categorical(probs)
            action_t = dist.sample()

            # 更新分割计数
            new_segment = (action_t == 1) & mask
            segment_count += new_segment.long()

            # 在分割点重置隐藏状态（用于下一个时间步）
            reset_mask = (action_t == 1) & mask
            h_t = torch.where(reset_mask.unsqueeze(1), torch.zeros_like(h_t), h_t)

            # 存储结果
            all_logits.append(logits_t)
            all_actions.append(action_t)

        # 堆叠时间步结果
        logits = torch.stack(all_logits, dim=1)  # (batch_size, seq_len, 2)
        actions = torch.stack(all_actions, dim=1)  # (batch_size, seq_len)

        # 获取动作概率和采样的动作
        # 创建概率分布
        probs = F.softmax(logits, dim=-1)
        dist = Categorical(probs)

        # 计算对数概率
        log_probs = dist.log_prob(actions)

        # 计算分割掩码
        segment_mask = self.compute_segments(actions, self.group_num)

        # 计算压缩embedding
        compressed_emb_with_origin_dim, compressed_emb = self.compute_compressed_embeddings(x, segment_mask)

        # 计算奖励
        rewards = self.compute_reward(x, compressed_emb_with_origin_dim)

        # 计算损失
        loss = self.reinforce_loss(log_probs, actions, rewards)
        if compressed_emb.shape[1] != self.group_num + 1:
            zeros_mat = torch.zeros(batch_size, self.group_num + 1 - compressed_emb.shape[1], emb_dim, requires_grad=True)
            compressed_emb = torch.concat([zeros_mat ,compressed_emb], dim=1)
        return compressed_emb, loss

    def compute_segments(self,actions, group_num):
        """
        根据动作计算分割点，并限制最大分割数
        :param actions: (batch_size, seq_len) 分割决策
        :param group_num: 最大子序列数
        :return: (batch_size, seq_len) 掩码矩阵，标记每个位置所属子序列
        """
        batch_size, seq_len = actions.shape
        device = actions.device

        # 初始化分割矩阵和分割计数器
        segment_mask = torch.zeros_like(actions)
        segment_count = torch.ones(batch_size, dtype=torch.long, device=device)

        # 第一个位置总是属于第一个子序列
        segment_mask[:, 0] = 1

        # 遍历序列计算分割
        for t in range(1, seq_len):
            # 当前是否分割（且未达到最大分割数）
            should_split = (actions[:, t] == 1) & (segment_count < group_num)

            # 更新分割计数器
            segment_count += should_split.long()

            # 更新子序列ID：分割则创建新ID，否则延续上一个
            segment_mask[:, t] = torch.where(
                should_split,
                segment_count,
                segment_mask[:, t - 1]
            )

        return segment_mask

    def compute_compressed_embeddings(self,x, segment_mask):
        """
        计算压缩后的embedding（子序列平均值）
        :param x: (batch_size, seq_len, embedding_dim) 原始序列
        :param segment_mask: (batch_size, seq_len) 子序列分配
        :return:
            compressed: (batch_size, seq_len, embedding_dim) 压缩后的embedding
            segment_emb: (batch_size, num_segments, embedding_dim) 每个子序列的压缩embedding
        """
        batch_size, seq_len, emb_dim = x.shape
        device = x.device

        # 获取每个样本的最大segment_id
        max_segment_id = segment_mask.max(dim=1).values

        # 为每个样本创建segment_id映射
        segment_emb = torch.zeros(batch_size, max_segment_id.max().item() + 1, emb_dim, device=device)
        segment_count = torch.zeros(batch_size, max_segment_id.max().item() + 1, device=device)

        # 使用scatter_add计算每个segment的总和和计数
        segment_emb.scatter_add_(1, segment_mask.unsqueeze(-1).expand(-1, -1, emb_dim), x)
        segment_count.scatter_add_(1, segment_mask, torch.ones_like(segment_mask, dtype=torch.float))

        # 计算平均值，避免除以零
        segment_emb = segment_emb / segment_count.unsqueeze(-1).clamp(min=1)

        # 创建压缩后的embedding（每个位置对应其segment的压缩表示）
        compressed = segment_emb.gather(1, segment_mask.unsqueeze(-1).expand(-1, -1, emb_dim))

        return compressed, segment_emb

    def compute_reward(self,original, compressed):
        """
        计算奖励：每个位置与其压缩embedding的余弦相似度之和
        :param original: (batch_size, seq_len, embedding_dim)
        :param compressed: (batch_size, seq_len, embedding_dim)
        :return: (batch_size,) 每个序列的奖励值
        """
        # 归一化计算余弦相似度
        return torch.nn.functional.cosine_similarity(original, compressed, dim=-1).mean(dim=-1)

    def reinforce_loss(self,log_probs, actions, rewards):
        """
        计算REINFORCE算法的损失
        :param log_probs: (batch_size, seq_len) 每个动作的对数概率
        :param actions: (batch_size, seq_len) 执行的动作
        :param rewards: (batch_size,) 每个序列的奖励
        :return: 损失值
        """
        # 计算每个序列的log概率之和
        log_prob_sum = log_probs.sum(dim=1)

        # REINFORCE损失：-log_prob * reward
        loss = - (log_prob_sum * rewards).mean()
        return loss

class Self_Attention(nn.Module):
    def __init__(self,
                 embedding_dim,
                 sequence_len,
                 heads,
                 drop_out=0.2,
                 ):
        super().__init__()
        self.head_dim = embedding_dim // heads
        self.embedding_dim = embedding_dim

        assert heads * self.head_dim == embedding_dim
        self.w_q = nn.Linear(self.head_dim, self.head_dim)
        self.w_k = nn.Linear(self.head_dim, self.head_dim)
        self.w_v = nn.Linear(self.head_dim, self.head_dim)
        self.w_o = nn.Linear(embedding_dim, embedding_dim)
        self.drop_out = nn.Dropout(drop_out)
        self.softmax = nn.Softmax(dim=-1)
        self.heads = heads
        self.sequence_len = sequence_len
        self.sqrt_d_out = sqrt(self.head_dim)

    def rope(self, x):
        base = 10000
        d = self.embedding_dim
        inv_freq = 1.0 / (base ** (torch.arange(0, d, 2) / d).float())
        seq_len = self.sequence_len
        seq_id = torch.arange(seq_len).float()
        id_theta = torch.einsum("m,d->md", seq_id, inv_freq)
        id_theta2 = torch.concat((id_theta, id_theta), dim=1)
        cos_cache = id_theta2.cos().to(x.device)
        sin_cache = id_theta2.sin().to(x.device)

        def rotate_half(_x):
            x1 = _x[..., :d // 2]
            x2 = _x[..., d // 2:]
            return torch.concat([-x2, x1], dim=-1)

        rotated_x = rotate_half(x)
        return (torch.concat([x[:,:seq_len,:]*cos_cache, x[:,seq_len:,:]],dim=1)
                + torch.concat([rotated_x[:,:seq_len,:]*sin_cache, x[:,seq_len:,:]], dim=1))

    def forward(self, input):
        b = input.shape[0]
        queries = self.rope(input).view(b, -1, self.heads, self.head_dim)
        keys = self.rope(input).view(b, -1, self.heads, self.head_dim)
        values = input.view(b, -1, self.heads, self.head_dim)

        queries = self.w_q(queries)
        keys = self.w_k(keys)
        values = self.w_v(values)

        energy = torch.einsum("nqhd,nkhd->nhqk", [queries, keys])

        atte = self.softmax(energy / self.sqrt_d_out)
        atte = self.drop_out(atte)
        out = torch.einsum("nhql,nlhd->nqhd", [atte, values]).reshape(
            b, self.sequence_len, self.embedding_dim
        )
        return self.w_o(out)

class Cross_Attention(nn.Module):
    def __init__(self, 
                 embedding_dim, 
                 q_len, 
                 kv_len,
                 heads,
                 drop_out=0.2):
        super().__init__()
        assert embedding_dim % heads == 0,(
            "embedding_dim 应该能被 heads整除"
        )
        self.embedding_dim = embedding_dim
        self.heads = heads
        self.q_len = q_len
        self.kv_len = kv_len
        self.head_dim = int(embedding_dim / heads)
        self.wq = nn.Linear(self.head_dim, self.head_dim,bias=False)
        self.wk = nn.Linear(self.head_dim, self.head_dim,bias=False)
        self.wv = nn.Linear(self.head_dim, self.head_dim,bias=False)
        self.dropout = nn.Dropout(drop_out)
        self.wo = nn.Linear(embedding_dim, embedding_dim)
        self.softmax = nn.Softmax(dim=-1)
        self.sqrt_d = sqrt(self.head_dim)

    def rope(self, x, seq_len):
            base = 10000
            d = self.embedding_dim
            inv_freq = 1.0 / (base ** (torch.arange(0, d, 2) / d).float())
            seq_id = torch.arange(seq_len).float()
            id_theta = torch.einsum("m,d->md", seq_id, inv_freq)
            id_theta2 = torch.concat((id_theta, id_theta), dim=1)
            cos_cache = id_theta2.cos().to(x.device)
            sin_cache = id_theta2.sin().to(x.device)
            def rotate_half(_x):
                x1 = _x[..., :d // 2]
                x2 = _x[..., d // 2:]
                return torch.concat([-x2, x1], dim=-1)

            rotated_x = rotate_half(x)
            return (torch.concat([x[:,:seq_len,:]*cos_cache, x[:,seq_len:,:]],dim=1)
                    + torch.concat([rotated_x[:,:seq_len,:]*sin_cache, x[:,seq_len:,:]], dim=1))

    def forward(self,q , kv):
        batch_size, seq_len , embedding_dim = kv.size()
        q = self.rope(q, self.q_len).view(batch_size, -1, self.heads, self.head_dim)
        k = self.rope(kv, self.kv_len).view(batch_size, -1, self.heads, self.head_dim)
        v = kv.view(batch_size, -1, self.heads, self.head_dim)
        query = self.wq(q)
        keys = self.wk(k)
        values = self.wv(v)

        atte = torch.einsum("bqhd,bkhd->bhqk",[query,keys])
        atte = self.softmax(atte / self.sqrt_d)
        ret_atte = atte
        atte = self.dropout(atte)
        out = torch.einsum("bhqk, bkhd->bqhd",[atte,values]).reshape(
            batch_size, self.q_len, self.embedding_dim
        )
        return self.wo(out) ,ret_atte

class Diff_Attention(nn.Module):
    def __init__(self,
                 embedding_dim,
                 q_len,
                 kv_len,
                 heads,
                 drop_out=0.2
                 ):
        super().__init__()
        self.head_dim = embedding_dim // heads
        self.embedding_dim = embedding_dim
        self.q_len = q_len
        self.kv_len = kv_len
        assert heads * self.head_dim == embedding_dim
        self.w_q = nn.Linear(self.head_dim, self.head_dim)
        self.w_k = nn.Linear(self.head_dim, self.head_dim)
        self.w_v = nn.Linear(self.head_dim, self.head_dim)
        self.w_o = nn.Linear(embedding_dim, embedding_dim)
        self.lambda_init = 0.6364809241795925
        self.lambda_q1 = nn.Parameter(torch.zeros(self.head_dim // 2, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.lambda_k1 = nn.Parameter(torch.zeros(self.head_dim // 2, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.lambda_q2 = nn.Parameter(torch.zeros(self.head_dim // 2, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.lambda_k2 = nn.Parameter(torch.zeros(self.head_dim // 2, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.drop_out = nn.Dropout(drop_out)
        self.ln = RMSNorm(self.head_dim)
        self.softmax = nn.Softmax(dim=-1)
        self.heads = heads

        self.sqrt_d_out = sqrt(self.head_dim // 2)

    def rope(self, x, seq_len):
        base = 10000
        d = self.embedding_dim
        inv_freq = 1.0 / (base ** (torch.arange(0, d, 2) / d).float())
        seq_id = torch.arange(seq_len).float()
        id_theta = torch.einsum("m,d->md", seq_id, inv_freq)
        id_theta2 = torch.concat((id_theta, id_theta), dim=1)
        cos_cache = id_theta2.cos().to(x.device)
        sin_cache = id_theta2.sin().to(x.device)

        def rotate_half(_x):
            x1 = _x[..., :d // 2]
            x2 = _x[..., d // 2:]
            return torch.concat([-x2, x1], dim=-1)

        rotated_x = rotate_half(x)
        return (torch.concat([x[:,:seq_len,:]*cos_cache, x[:,seq_len:,:]],dim=1)
                + torch.concat([rotated_x[:,:seq_len,:]*sin_cache, x[:,seq_len:,:]], dim=1))

    def forward(self, q , kv):
        b = q.shape[0]
        queries = self.rope(q, self.q_len).view(b, -1, self.heads, self.head_dim)
        keys = self.rope(kv, self.kv_len).view(b, -1, self.heads, self.head_dim)
        values = kv.view(b, -1, self.heads, self.head_dim)

        q1,q2 = self.w_q(queries).split(self.head_dim // 2,dim=-1)
        k1,k2 = self.w_k(keys).split(self.head_dim // 2,dim=-1)
        values = self.w_v(values)

        energy1 = torch.einsum("nqhd,nkhd->nhqk", [q1, k1])
        energy2 = torch.einsum("nqhd,nkhd->nhqk", [q2, k2])
        energy = self.softmax(energy1) - self.lambda_init * self.softmax(energy2)

        atte = self.softmax(energy / self.sqrt_d_out)
        ret_atte = atte
        atte = self.drop_out(atte)
        out = torch.einsum("nhqk,nkhd->nqhd", [atte, values])
        out = self.ln(out) * (1- self.lambda_init)
        out = out.reshape(b, self.q_len, self.embedding_dim)
        return self.w_o(out), ret_atte

class RMSNorm(nn.Module):
    def __init__(self,normalized_shape, eps=1e-8):
        super(RMSNorm, self).__init__()
        self.eps = eps
        if isinstance(normalized_shape, int):
            normalized_shape = (normalized_shape,)
        self.weight = nn.Parameter(torch.ones(normalized_shape))

    def forward(self, x):
        mean_square = torch.mean(x ** 2, dim = -1,keepdim=True)
        x_normalized = x / torch.sqrt(mean_square + self.eps)
        return self.weight * x_normalized

class MLP(nn.Module):
    def __init__(self, in_feature):
        super().__init__()
        self.ln1 = nn.Linear(in_feature, in_feature*3,bias=False)
        self.ln2 = nn.Linear(in_feature, in_feature*3,bias=False)
        self.activate_fn = nn.SiLU()
        self.ln3 = nn.Linear(in_feature*3, in_feature,bias=False)

    def forward(self,x):
        gate_pro_out = self.activate_fn(self.ln1(x))
        up_pro_out = self.ln2(x)
        return self.ln3(gate_pro_out * up_pro_out)

class Casual_Transformer(nn.Module):
    def __init__(self,
                 embedding_dim,
                 heads,
                 kv_len,
                 q_len,
                 drop_out=0.2,
                 diff=False
                 ):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.heads = heads
        self.kv_len = kv_len
        self.q_len = q_len
        self.atten_norm = RMSNorm(embedding_dim)
        self.mlp_norm = RMSNorm(embedding_dim)
        self.diff = diff 
        if not diff:
            self.mmhsa = Cross_Attention(embedding_dim,
                                        q_len,
                                        kv_len,
                                        heads,
                                        drop_out=drop_out)
        else:
            self.mmhsa = Diff_Attention(embedding_dim ,
                                        q_len,
                                        kv_len,
                                        heads,
                                        drop_out=drop_out)
        self.mlp = MLP(embedding_dim)

    def forward(self, qkv):
        if len(qkv) == 2:
            q, kv = qkv
        elif len(qkv) == 3:
            q, kv, atte = qkv
        else:
            exit()
        hidden_state, atte = self.mmhsa(self.atten_norm(q), self.atten_norm(kv))
        hidden_state = q + hidden_state
        q = hidden_state + self.mlp(self.mlp_norm(hidden_state))
        return q, kv, atte

class RegLoss(nn.Module):
    """RegLoss, L2 regularization on model parameters"""

    def __init__(self):
        super(RegLoss, self).__init__()

    def forward(self, parameters):
        reg_loss = None
        for W in parameters:
            if reg_loss is None:
                reg_loss = W.norm(2)
            else:
                reg_loss = reg_loss + W.norm(2)
        return reg_loss

class BPRLoss(nn.Module):
    """BPRLoss, based on Bayesian Personalized Ranking

    Args:
        - gamma(float): Small value to avoid division by zero

    Shape:
        - Pos_score: (N)
        - Neg_score: (N), same shape as the Pos_score
        - Output: scalar.

    Examples::

        >>> loss = BPRLoss()
        >>> pos_score = torch.randn(3, requires_grad=True)
        >>> neg_score = torch.randn(3, requires_grad=True)
        >>> output = loss(pos_score, neg_score)
        >>> output.backward()
    """

    def __init__(self, gamma=1e-10):
        super(BPRLoss, self).__init__()
        self.gamma = gamma

    def forward(self, pos_score, neg_score):
        loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)).mean()
        return loss

if __name__ == '__main__':
    compre = CompressionPolicyNetwork_MLP(20, 9,20)
    item_embeds = torch.randn(5, 20, 20, requires_grad=True)
    print(compre(item_embeds)[1].requires_grad,
          compre(item_embeds)[0].requires_grad,
          compre(item_embeds)[0].shape)
