from math import sqrt
import numpy as np
import torch
import torch.nn as nn
import torch
import torch.nn.functional as F
from collections import Counter

class Embedding(nn.Module):
    def __init__(self,
                 vocab_size_dic,
                 id_embedding_dim=64,
                 simple_embedding_dim=8,
                 ):
        """
        :param vocab_size_dic: 字典，存储用户行为中的各个特征以及其vocab size
                                    格式-->{
                                            'inherent_feature':{'item_id':vocab_num, 'author':vocab_num,...},
                                            'cross_feature':{'play_time':vocab_num, 'date':vocab_num,...}
                                        }
        :param id_embedding_dim: vocab 数量多的特征的embed dim
        :param simple_embedding_dim: vocab 数量少的embed dim
        """
        super(Embedding, self).__init__()
        self.vocab_size_dic = vocab_size_dic
        self.simple_embedding_dim = simple_embedding_dim
        self.id_embedding_dim = id_embedding_dim
        vocab_num_list = [] # [(vocab_num, embedding_dim),...]
        h = 0
        for feature, vocab_num in vocab_size_dic['inherent_feature'].items():
            if 'id' in feature:
                vocab_num_list.append((vocab_num, id_embedding_dim))
                h += id_embedding_dim
            else:
                vocab_num_list.append((vocab_num, simple_embedding_dim))
                h += simple_embedding_dim

        c = 0

        for feature,vocab_num  in vocab_size_dic['cross_feature'].items():
            if 'id' in feature:
                vocab_num_list.append((vocab_num, id_embedding_dim))
                c += id_embedding_dim
            else:
                vocab_num_list.append((vocab_num, simple_embedding_dim))
                c += simple_embedding_dim

        self.h = h
        self.c = c
        self.vocab_size_list = [vocab_num for vocab_num, _ in vocab_num_list]

        self.embed_layer_dic = nn.ModuleDict()
        for idx, (vocab_num, embed_dim) in enumerate(vocab_num_list):
            if embed_dim == id_embedding_dim:
                layer_name = 'item_id_embedding_{}'.format(idx)
            else:
                layer_name = str(idx)
            self.embed_layer_dic[layer_name] = nn.Embedding(vocab_num, embed_dim)
        self.mlp = nn.Sequential(
            nn.Linear(h+c,h+c),
            nn.ReLU6()
        )

    def forward(self, s):
        """

        :param s: b, behaviors_num, behavior_feature_num:( )
        :return:
        """
        embed_list = []
        index = 0
        for embed_layer in self.embed_layer_dic.values():
            input = s.narrow(-1, start=index, length=1)
            embed_list.append(embed_layer(input).squeeze(-2))
            index += 1
            if index >= s.shape[-1]:
                break
        final_embed = torch.cat(embed_list, dim=-1)
        return self.mlp(final_embed)



class Self_Attention(nn.Module):
    def __init__(self,
                 embedding_dim,
                 sequence_len,
                 heads,
                 drop_out=0.2,
                 ):
        super().__init__()
        self.head_dim = embedding_dim // heads
        self.embedding_dim = embedding_dim

        assert heads * self.head_dim == embedding_dim
        self.w_q = nn.Linear(self.head_dim, self.head_dim)
        self.w_k = nn.Linear(self.head_dim, self.head_dim)
        self.w_v = nn.Linear(self.head_dim, self.head_dim)
        self.w_o = nn.Linear(embedding_dim, embedding_dim)
        self.drop_out = nn.Dropout(drop_out)
        self.softmax = nn.Softmax(dim=-1)
        self.heads = heads
        self.sequence_len = sequence_len
        self.sqrt_d_out = sqrt(self.head_dim)

    def rope(self, x):
        base = 10000
        d = self.embedding_dim
        inv_freq = 1.0 / (base ** (torch.arange(0, d, 2) / d).float())
        seq_len = self.sequence_len
        seq_id = torch.arange(seq_len).float()
        id_theta = torch.einsum("m,d->md", seq_id, inv_freq)
        id_theta2 = torch.concat((id_theta, id_theta), dim=1)
        cos_cache = id_theta2.cos().to(x.device)
        sin_cache = id_theta2.sin().to(x.device)

        def rotate_half(_x):
            x1 = _x[..., :d // 2]
            x2 = _x[..., d // 2:]
            return torch.concat([-x2, x1], dim=-1)

        rotated_x = rotate_half(x)
        return (torch.concat([x[:,:seq_len,:]*cos_cache, x[:,seq_len:,:]],dim=1)
                + torch.concat([rotated_x[:,:seq_len,:]*sin_cache, x[:,seq_len:,:]], dim=1))

    def forward(self, input):
        b = input.shape[0]
        queries = self.rope(input).view(b, -1, self.heads, self.head_dim)
        keys = self.rope(input).view(b, -1, self.heads, self.head_dim)
        values = input.view(b, -1, self.heads, self.head_dim)

        queries = self.w_q(queries)
        keys = self.w_k(keys)
        values = self.w_v(values)

        energy = torch.einsum("nqhd,nkhd->nhqk", [queries, keys])

        atte = self.softmax(energy / self.sqrt_d_out)
        atte = self.drop_out(atte)
        out = torch.einsum("nhql,nlhd->nqhd", [atte, values]).reshape(
            b, self.sequence_len, self.embedding_dim
        )
        return self.w_o(out)


class Series_Compression(nn.Module):
    def __init__(self, sequence_len,embedding_dim, heads):
        super().__init__()
        self.sequence_len = sequence_len
        self.embedding_dim = embedding_dim
        self.mhsa = Self_Attention(embedding_dim, sequence_len, heads)

    def forward(self, ts):
        b = ts.shape[0]
        seq_len = ts.shape[-2]
        assert (
                (seq_len // self.sequence_len) == (seq_len / self.sequence_len)
        ), f"压缩序列需要被{self.sequence_len}整除"
        ts = ts.reshape(b * (seq_len // self.sequence_len), self.sequence_len, self.embedding_dim)
        return torch.mean(self.mhsa(ts), dim=-2).reshape(b, seq_len // self.sequence_len, self.embedding_dim)


class Cross_Attention(nn.Module):
    def __init__(self, 
                 embedding_dim, 
                 q_len, 
                 kv_len,
                 heads,
                 drop_out=0.2):
        super().__init__()
        assert embedding_dim % heads == 0,(
            "embedding_dim 应该能被 heads整除"
        )
        self.embedding_dim = embedding_dim
        self.heads = heads
        self.q_len = q_len
        self.kv_len = kv_len
        self.head_dim = int(embedding_dim / heads)
        self.wq = nn.Linear(self.head_dim, self.head_dim,bias=False)
        self.wk = nn.Linear(self.head_dim, self.head_dim,bias=False)
        self.wv = nn.Linear(self.head_dim, self.head_dim,bias=False)
        self.dropout = nn.Dropout(drop_out)
        self.wo = nn.Linear(embedding_dim, embedding_dim)
        self.softmax = nn.Softmax(dim=-1)
        self.sqrt_d = sqrt(self.head_dim)

    def rope(self, x, seq_len):
            base = 10000
            d = self.embedding_dim
            inv_freq = 1.0 / (base ** (torch.arange(0, d, 2) / d).float())
            seq_id = torch.arange(seq_len).float()
            id_theta = torch.einsum("m,d->md", seq_id, inv_freq)
            id_theta2 = torch.concat((id_theta, id_theta), dim=1)
            cos_cache = id_theta2.cos().to(x.device)
            sin_cache = id_theta2.sin().to(x.device)
            def rotate_half(_x):
                x1 = _x[..., :d // 2]
                x2 = _x[..., d // 2:]
                return torch.concat([-x2, x1], dim=-1)

            rotated_x = rotate_half(x)
            return (torch.concat([x[:,:seq_len,:]*cos_cache, x[:,seq_len:,:]],dim=1)
                    + torch.concat([rotated_x[:,:seq_len,:]*sin_cache, x[:,seq_len:,:]], dim=1))

    def forward(self,q , kv):
        batch_size, seq_len , embedding_dim = kv.size()
        q = self.rope(q, self.q_len).view(batch_size, -1, self.heads, self.head_dim)
        k = self.rope(kv, self.kv_len).view(batch_size, -1, self.heads, self.head_dim)
        v = kv.view(batch_size, -1, self.heads, self.head_dim)
        query = self.wq(q)
        keys = self.wk(k)
        values = self.wv(v)

        atte = torch.einsum("bqhd,bkhd->bhqk",[query,keys])
        atte = self.softmax(atte / self.sqrt_d)
        ret_atte = atte
        atte = self.dropout(atte)
        out = torch.einsum("bhqk, bkhd->bqhd",[atte,values]).reshape(
            batch_size, self.q_len, self.embedding_dim
        )
        return self.wo(out) ,ret_atte

class Diff_Attention(nn.Module):
    def __init__(self,
                 embedding_dim,
                 q_len,
                 kv_len,
                 heads,
                 drop_out=0.2
                 ):
        super().__init__()
        self.head_dim = embedding_dim // heads
        self.embedding_dim = embedding_dim
        self.q_len = q_len
        self.kv_len = kv_len
        assert heads * self.head_dim == embedding_dim
        self.w_q = nn.Linear(self.head_dim, self.head_dim)
        self.w_k = nn.Linear(self.head_dim, self.head_dim)
        self.w_v = nn.Linear(self.head_dim, self.head_dim)
        self.w_o = nn.Linear(embedding_dim, embedding_dim)
        self.lambda_init = 0.6364809241795925
        self.lambda_q1 = nn.Parameter(torch.zeros(self.head_dim // 2, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.lambda_k1 = nn.Parameter(torch.zeros(self.head_dim // 2, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.lambda_q2 = nn.Parameter(torch.zeros(self.head_dim // 2, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.lambda_k2 = nn.Parameter(torch.zeros(self.head_dim // 2, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.drop_out = nn.Dropout(drop_out)
        self.ln = RMSNorm(self.head_dim)
        self.softmax = nn.Softmax(dim=-1)
        self.heads = heads

        self.sqrt_d_out = sqrt(self.head_dim // 2)

    def rope(self, x, seq_len):
        base = 10000
        d = self.embedding_dim
        inv_freq = 1.0 / (base ** (torch.arange(0, d, 2) / d).float())
        seq_id = torch.arange(seq_len).float()
        id_theta = torch.einsum("m,d->md", seq_id, inv_freq)
        id_theta2 = torch.concat((id_theta, id_theta), dim=1)
        cos_cache = id_theta2.cos().to(x.device)
        sin_cache = id_theta2.sin().to(x.device)

        def rotate_half(_x):
            x1 = _x[..., :d // 2]
            x2 = _x[..., d // 2:]
            return torch.concat([-x2, x1], dim=-1)

        rotated_x = rotate_half(x)
        return (torch.concat([x[:,:seq_len,:]*cos_cache, x[:,seq_len:,:]],dim=1)
                + torch.concat([rotated_x[:,:seq_len,:]*sin_cache, x[:,seq_len:,:]], dim=1))

    def forward(self, q , kv):
        b = q.shape[0]
        queries = self.rope(q, self.q_len).view(b, -1, self.heads, self.head_dim)
        keys = self.rope(kv, self.kv_len).view(b, -1, self.heads, self.head_dim)
        values = kv.view(b, -1, self.heads, self.head_dim)

        q1,q2 = self.w_q(queries).split(self.head_dim // 2,dim=-1)
        k1,k2 = self.w_k(keys).split(self.head_dim // 2,dim=-1)
        values = self.w_v(values)

        energy1 = torch.einsum("nqhd,nkhd->nhqk", [q1, k1])
        energy2 = torch.einsum("nqhd,nkhd->nhqk", [q2, k2])
        energy = self.softmax(energy1) - self.lambda_init * self.softmax(energy2)

        atte = self.softmax(energy / self.sqrt_d_out)
        ret_atte = atte
        atte = self.drop_out(atte)
        out = torch.einsum("nhqk,nkhd->nqhd", [atte, values])
        out = self.ln(out) * (1- self.lambda_init)
        out = out.reshape(b, self.q_len, self.embedding_dim)
        return self.w_o(out), ret_atte


class RMSNorm(nn.Module):
    def __init__(self,normalized_shape, eps=1e-8):
        super(RMSNorm, self).__init__()
        self.eps = eps
        if isinstance(normalized_shape, int):
            normalized_shape = (normalized_shape,)
        self.weight = nn.Parameter(torch.ones(normalized_shape))

    def forward(self, x):
        mean_square = torch.mean(x ** 2, dim = -1,keepdim=True)
        x_normalized = x / torch.sqrt(mean_square + self.eps)
        return self.weight * x_normalized


class MLP(nn.Module):
    def __init__(self, in_feature):
        super().__init__()
        self.ln1 = nn.Linear(in_feature, in_feature*3,bias=False)
        self.ln2 = nn.Linear(in_feature, in_feature*3,bias=False)
        self.activate_fn = nn.SiLU()
        self.ln3 = nn.Linear(in_feature*3, in_feature,bias=False)

    def forward(self,x):
        gate_pro_out = self.activate_fn(self.ln1(x))
        up_pro_out = self.ln2(x)
        return self.ln3(gate_pro_out * up_pro_out)


class Casual_Transformer(nn.Module):
    def __init__(self,
                 embedding_dim,
                 heads,
                 kv_len,
                 q_len,
                 drop_out=0.2,
                 diff=False
                 ):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.heads = heads
        self.kv_len = kv_len
        self.q_len = q_len
        self.atten_norm = RMSNorm(embedding_dim)
        self.mlp_norm = RMSNorm(embedding_dim)
        self.diff = diff 
        if not diff:
            self.mmhsa = Cross_Attention(embedding_dim,
                                        q_len,
                                        kv_len,
                                        heads,
                                        drop_out=drop_out)
        else:
            self.mmhsa = Diff_Attention(embedding_dim ,
                                        q_len,
                                        kv_len,
                                        heads,
                                        drop_out=drop_out)
        self.mlp = MLP(embedding_dim)

    def forward(self, qkv):
        if len(qkv) == 2:
            q, kv = qkv
        elif len(qkv) == 3:
            q, kv, atte = qkv
        hidden_state , atte = self.mmhsa(self.atten_norm(q),self.atten_norm(kv))
        hidden_state = q + hidden_state
        q = hidden_state + self.mlp(self.mlp_norm(hidden_state))
        return q, kv, atte


class ETA(nn.Module):
    def __init__(self, h, c, simple_feature_dim=8):
        """
        :param h: total inherent dim
        :param c: total cross dim
        """
        super(ETA, self).__init__()
        self.simple_feature_dim = simple_feature_dim
        self.wh1 = nn.Linear(h, h)
        self.wh2 = nn.Linear(h, h)
        self.wcs = nn.ModuleList()
        if c != 0:
            j = int(c / simple_feature_dim)
            self.j = j
            for i in range(j):
                self.wcs.append(nn.Linear(simple_feature_dim, 1))
            self.beta = nn.Linear(j, 1)
        self.wq1 = nn.Linear(h, h)
        self.wq2 = nn.Linear(h, h)
        self.lambda_init = 0.6364809241795925
        self.lambda_q1 = nn.Parameter(torch.zeros(h, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.lambda_k1 = nn.Parameter(torch.zeros(h, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.lambda_q2 = nn.Parameter(torch.zeros(h, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.lambda_k2 = nn.Parameter(torch.zeros(h, dtype=torch.float32).normal_(mean=0, std=0.1))
        self.softmax = nn.Softmax(dim=-1)

    def forward(self, q,kh,kc):
        """
        :param kh: b, s, h
        :param kc: b, s, c
        :param q: b, h
        """
        dk = kh.shape[-1]
        output_h1 = self.wh1(kh) # b, s, h -> b, s, h
        output_h2 = self.wh2(kh) # b, s, h -> b, s, h
        output_q1 = self.wq1(q).permute(1,2) # b,q, h -> b, h, q
        output_q2 = self.wq2(q).permute(1,2) # b,q, h -> b, h, q
        output_c_list = []
        index = 0
        if len(self.wcs) != 0:
            for wc in self.wcs:
                output_c_list.append(wc(kc.narrow(dim=-1, start=index,length=self.simple_feature_dim))) # b, s, simple_feature_dim -> b, s, 1
                index += self.simple_feature_dim
            output_c = torch.cat(output_c_list, dim=-1) # b, s, j
            # bmm: b,s,h * b, h, q -> b, s, q
            alpha1 = output_h1.bmm(torch.permute(output_q1,[0,2,1])) / sqrt(dk) + self.beta(output_c)
            alpha2 = output_h2.bmm(torch.permute(output_q2,[0,2,1])) / sqrt(dk) + self.beta(output_c)

        else:
            alpha1 = output_h1.bmm(torch.permute(output_q1,[0,2,1])) / sqrt(dk)
            alpha2 = output_h2.bmm(torch.permute(output_q2,[0,2,1])) / sqrt(dk)
        alpha1 = alpha1.permute(1,2)
        alpha2 = alpha2.permute(1,2)
        lambda_1 = torch.exp(torch.sum(self.lambda_q1 * self.lambda_k1, dim=-1).float()).type_as(q)
        lambda_2 = torch.exp(torch.sum(self.lambda_q2 * self.lambda_k2, dim=-1).float()).type_as(q)
        lambda_full = lambda_1 - lambda_2 + self.lambda_init
        alpha = self.softmax(alpha1) - lambda_full * self.softmax(alpha2)
        return alpha # b,s


class ESU(nn.Module):
    def __init__(self,
                 h,
                 c,
                 heads,
                 simple_embedding_dim = 8
                 ):
        super().__init__()
        self.h = h
        self.c = c
        self.heads = heads
        self.multi_head_etas = get_multi_head_etas(heads, h,c, simple_embedding_dim=simple_embedding_dim)
        self.embedding_dim= h+c
        self.w_o = nn.Linear(heads, 1, bias=False)
        self.w_v = nn.Linear(self.h + self.c, self.h + self.c)

    def forward(self,q, kv):
        b = q.shape[0]
        kh = torch.narrow(kv, dim=-1, start=0, length=self.h)
        kc = torch.narrow(kv, dim=-1, start=self.h, length=self.c)
        if kc is not None:
            K = torch.cat((kh, kc), dim=-1)  # b, k, h+c
        else:
            K = kh
        head_list = []
        alpha_list = []
        for multi_head_eta in self.multi_head_etas:
            cur_alpha = multi_head_eta(q, kh, kc) # b ,q, s
            alpha_list.append(cur_alpha.unsqueeze(1))
            head_list.append(cur_alpha.bmm(self.w_v(K)))  # w_v(k) : b, s, embedding_dim  head_list: [ tensor(b ,q, out_channel) , ... ]
        q = self.w_o(torch.cat(head_list, dim=1).reshape(b, -1, self.heads)).squeeze(-1) 
        return q.reshape(b, -1, self.embedding_dim), torch.concat(alpha_list, dim = 1).mean(dim=1)


def get_multi_head_etas(heads, h, c, simple_embedding_dim):
    multi_head_etas = nn.ModuleList()
    for i in range(heads):
        multi_head_etas.append(ETA(h, c, simple_embedding_dim))
    return multi_head_etas



class Intersection(nn.Module):
    def __init__(self, dim):
        super(Intersection, self).__init__()
        self.dim = dim
        self.feature_layer_1 = nn.Linear(self.dim * 2, self.dim, bias=False)
        self.feature_layer_2 = nn.Linear(2 * self.dim, self.dim, bias=False)
        # self.feature_layer_1 = nn.Linear(self.dim * 2, self.dim, bias=False)

        nn.init.xavier_uniform_(self.feature_layer_1.weight)
        nn.init.xavier_uniform_(self.feature_layer_2.weight)

    def forward(self, alpha, beta):
        # feature: N x B x d
        # logic:  N x B x d
        logits = torch.cat([alpha, beta], dim=-1)  # N x B x 2d
        # mask is needed
        # print(mask[0, 0:5])
        # att_input = self.feature_layer_2(F.relu(self.feature_layer_1(logits))) * mask.unsqueeze(2) * 0.05
        att_input = self.feature_layer_1(logits)
        # att_input = self.feature_layer_1(logits) * mask.unsqueeze(2) * 0.05
        # print('att_input', att_input[0, 0:5, 0])

        attention = F.softmax(att_input, dim=1)
        # print('att', attention[0, 0:5, 0])

        alpha = torch.sum(attention * alpha, dim=1)
        beta = torch.sum(attention * beta, dim=1)

        # alpha, beta = self.

        return alpha, beta


class Negation(nn.Module):
    def __init__(self):
        super(Negation, self).__init__()

    def neg_feature(self, feature):
        # f,f' in [-L, L]
        # f' = (f + 2L) % (2L) - L, where L=1
        feature = feature
        # indicator_positive = feature >= 0
        # indicator_negative = feature < 0
        # feature[indicator_positive] = feature[indicator_positive] - 1
        # feature[indicator_negative] = feature[indicator_negative] + 1
        return feature

    # def forward(self, feature, logic):
    #     feature = self.neg_feature(feature)
    #     logic = 1 - logic
    #     return feature, logic

    def forward(self, logic):
        logic = 1./logic
        return logic

class Regularizer():
    def __init__(self, base_add, min_val, max_val):
        self.base_add = base_add
        self.min_val = min_val
        self.max_val = max_val

    def __call__(self, entity_embedding):
        return torch.clamp(entity_embedding + self.base_add, self.min_val, self.max_val)


class AbstractSampler(object):
    """:class:`AbstractSampler` is a abstract class, all sampler should inherit from it. This sampler supports returning
    a certain number of random value_ids according to the input key_id, and it also supports to prohibit
    certain key-value pairs by setting used_ids.

    Args:
        distribution (str): The string of distribution, which is used for subclass.

    Attributes:
        used_ids (numpy.ndarray): The result of :meth:`get_used_ids`.
    """

    def __init__(self, distribution, alpha):
        self.distribution = ""
        self.alpha = alpha
        self.set_distribution(distribution)
        self.used_ids = self.get_used_ids()

    def set_distribution(self, distribution):
        """Set the distribution of sampler.

        Args:
            distribution (str): Distribution of the negative items.
        """
        self.distribution = distribution
        if distribution == "popularity":
            self._build_alias_table()

    def _uni_sampling(self, sample_num):
        """Sample [sample_num] items in the uniform distribution.

        Args:
            sample_num (int): the number of samples.

        Returns:
            sample_list (np.array): a list of samples.
        """
        raise NotImplementedError("Method [_uni_sampling] should be implemented")

    def _get_candidates_list(self):
        """Get sample candidates list for _pop_sampling()

        Returns:
            candidates_list (list): a list of candidates id.
        """
        raise NotImplementedError("Method [_get_candidates_list] should be implemented")

    def _build_alias_table(self):
        """Build alias table for popularity_biased sampling."""
        candidates_list = self._get_candidates_list()
        self.prob = dict(Counter(candidates_list))
        self.alias = self.prob.copy()
        large_q = []
        small_q = []
        for i in self.prob:
            self.alias[i] = -1
            self.prob[i] = self.prob[i] / len(candidates_list)
            self.prob[i] = pow(self.prob[i], self.alpha)
        normalize_count = sum(self.prob.values())
        for i in self.prob:
            self.prob[i] = self.prob[i] / normalize_count * len(self.prob)
            if self.prob[i] > 1:
                large_q.append(i)
            elif self.prob[i] < 1:
                small_q.append(i)
        while len(large_q) != 0 and len(small_q) != 0:
            l = large_q.pop(0)
            s = small_q.pop(0)
            self.alias[s] = l
            self.prob[l] = self.prob[l] - (1 - self.prob[s])
            if self.prob[l] < 1:
                small_q.append(l)
            elif self.prob[l] > 1:
                large_q.append(l)

    def _pop_sampling(self, sample_num):
        """Sample [sample_num] items in the popularity-biased distribution.

        Args:
            sample_num (int): the number of samples.

        Returns:
            sample_list (np.array): a list of samples.
        """

        keys = list(self.prob.keys())
        random_index_list = np.random.randint(0, len(keys), sample_num)
        random_prob_list = np.random.random(sample_num)
        final_random_list = []

        for idx, prob in zip(random_index_list, random_prob_list):
            if self.prob[keys[idx]] > prob:
                final_random_list.append(keys[idx])
            else:
                final_random_list.append(self.alias[keys[idx]])

        return np.array(final_random_list)

    def sampling(self, sample_num):
        """Sampling [sample_num] item_ids.

        Args:
            sample_num (int): the number of samples.

        Returns:
            sample_list (np.array): a list of samples and the len is [sample_num].
        """
        if self.distribution == "uniform":
            return self._uni_sampling(sample_num)
        elif self.distribution == "popularity":
            return self._pop_sampling(sample_num)
        else:
            raise NotImplementedError(
                f"The sampling distribution [{self.distribution}] is not implemented."
            )

    def get_used_ids(self):
        """
        Returns:
            numpy.ndarray: Used ids. Index is key_id, and element is a set of value_ids.
        """
        raise NotImplementedError("Method [get_used_ids] should be implemented")

    def sample_by_key_ids(self, key_ids, num):
        """Sampling by key_ids.

        Args:
            key_ids (numpy.ndarray or list): Input key_ids.
            num (int): Number of sampled value_ids for each key_id.

        Returns:
            torch.tensor: Sampled value_ids.
            value_ids[0], value_ids[len(key_ids)], value_ids[len(key_ids) * 2], ..., value_id[len(key_ids) * (num - 1)]
            is sampled for key_ids[0];
            value_ids[1], value_ids[len(key_ids) + 1], value_ids[len(key_ids) * 2 + 1], ...,
            value_id[len(key_ids) * (num - 1) + 1] is sampled for key_ids[1]; ...; and so on.
        """
        key_ids = np.array(key_ids)
        key_num = len(key_ids)
        total_num = key_num * num
        if (key_ids == key_ids[0]).all():
            key_id = key_ids[0]
            used = np.array(list(self.used_ids[key_id]))
            value_ids = self.sampling(total_num)
            check_list = np.arange(total_num)[np.isin(value_ids, used)]
            while len(check_list) > 0:
                value_ids[check_list] = value = self.sampling(len(check_list))
                mask = np.isin(value, used)
                check_list = check_list[mask]
        else:
            value_ids = np.zeros(total_num, dtype=np.int64)
            check_list = np.arange(total_num)
            key_ids = np.tile(key_ids, num)
            while len(check_list) > 0:
                value_ids[check_list] = self.sampling(len(check_list))
                check_list = np.array(
                    [
                        i
                        for i, used, v in zip(
                            check_list,
                            self.used_ids[key_ids[check_list]],
                            value_ids[check_list],
                        )
                        if v in used
                    ]
                )
        return torch.tensor(value_ids)


class LogicalSampler():
    def __init__(self):
        super().__init__()

    def _uni_sampling(self, seq_num):
        return np.random.randint(1, self.item_num, seq_num)


    def sample_neg_sequence(self, pos_sequence, sample_num):
        """For each moment, sampling 'sample_num' item from all the items except the one the user clicked on at that moment.

        Args:
            pos_sequence (torch.Tensor):  all users' item history sequence, with the shape of `(N, )`.

        Returns:
            torch.tensor : all users' negative item history sequence.

        """
        total_num = len(pos_sequence)
        value_ids = []
        for i in range(sample_num):
            check_list = np.arange(total_num)
            tem_ids = np.zeros(total_num, dtype=np.int64)
            while len(check_list) > 0:
                tem_ids[check_list] = self._uni_sampling(len(check_list))
                check_index = np.where(tem_ids[check_list] == pos_sequence[check_list])
                check_list = check_list[check_index]
            pos_sequence = torch.cat([torch.tensor(tem_ids.reshape(total_num, 1)).cuda(), pos_sequence], dim=-1)
            value_ids.append(tem_ids)

        value_ids = torch.tensor(np.array(value_ids)).t().cuda()
        return value_ids
class RegLoss(nn.Module):
    """RegLoss, L2 regularization on model parameters"""

    def __init__(self):
        super(RegLoss, self).__init__()

    def forward(self, parameters):
        reg_loss = None
        for W in parameters:
            if reg_loss is None:
                reg_loss = W.norm(2)
            else:
                reg_loss = reg_loss + W.norm(2)
        return reg_loss


class BPRLoss(nn.Module):
    """BPRLoss, based on Bayesian Personalized Ranking

    Args:
        - gamma(float): Small value to avoid division by zero

    Shape:
        - Pos_score: (N)
        - Neg_score: (N), same shape as the Pos_score
        - Output: scalar.

    Examples::

        >>> loss = BPRLoss()
        >>> pos_score = torch.randn(3, requires_grad=True)
        >>> neg_score = torch.randn(3, requires_grad=True)
        >>> output = loss(pos_score, neg_score)
        >>> output.backward()
    """

    def __init__(self, gamma=1e-10):
        super(BPRLoss, self).__init__()
        self.gamma = gamma

    def forward(self, pos_score, neg_score):
        loss = -torch.log(self.gamma + torch.sigmoid(pos_score - neg_score)).mean()
        return loss