import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math


class Embedding(nn.Module):
    def __init__(self,
                 vocab_size_dic,
                 id_embedding_dim=64,
                 simple_embedding_dim=8,
                 ):
        """
        :param vocab_size_dic: 字典，存储用户行为中的各个特征以及其vocab size
                                    格式-->{
                                            'inherent_feature':{'item_id':vocab_num, 'author':vocab_num,...},
                                            'cross_feature':{'play_time':vocab_num, 'date':vocab_num,...}
                                        }
        :param id_embedding_dim: vocab 数量多的特征的embed dim
        :param simple_embedding_dim: vocab 数量少的embed dim
        """
        super(Embedding, self).__init__()
        self.vocab_size_dic = vocab_size_dic
        self.simple_embedding_dim = simple_embedding_dim
        self.id_embedding_dim = id_embedding_dim
        vocab_num_list = [] # [(vocab_num, embedding_dim),...]
        h = 0
        for feature, vocab_num in vocab_size_dic['inherent_feature'].items():
            if 'id' in feature:
                vocab_num_list.append((vocab_num, id_embedding_dim))
                h += id_embedding_dim
            else:
                vocab_num_list.append((vocab_num, simple_embedding_dim))
                h += simple_embedding_dim

        c = 0

        for feature,vocab_num  in vocab_size_dic['cross_feature'].items():
            if 'id' in feature:
                vocab_num_list.append((vocab_num, id_embedding_dim))
                c += id_embedding_dim
            else:
                vocab_num_list.append((vocab_num, simple_embedding_dim))
                c += simple_embedding_dim

        self.h = h
        self.c = c
        self.vocab_size_list = [vocab_num for vocab_num, _ in vocab_num_list]

        self.embed_layer_dic = nn.ModuleDict()
        for idx, (vocab_num, embed_dim) in enumerate(vocab_num_list):
            if embed_dim == id_embedding_dim:
                layer_name = 'item_id_embedding_{}'.format(idx)
            else:
                layer_name = str(idx)
            self.embed_layer_dic[layer_name] = nn.Embedding(vocab_num, embed_dim)
        self.mlp = nn.Sequential(
            nn.Linear(h+c,h+c),
            nn.ReLU6()
        )

    def forward(self, s):
        """

        :param s: b, behaviors_num, behavior_feature_num:( )
        :return:
        """
        embed_list = []
        index = 0
        for embed_layer in self.embed_layer_dic.values():
            input = s.narrow(-1, start=index, length=1)
            embed_list.append(embed_layer(input).squeeze(-2))
            index += 1
            if index >= s.shape[-1]:
                break
        final_embed = torch.cat(embed_list, dim=-1)
        return self.mlp(final_embed)


class Self_Attention(nn.Module):
    def __init__(self,
                 embedding_dim,
                 sequence_len,
                 heads,
                 q_num = 0,
                 with_mask=False,
                 drop_out=0.2,
                 ):
        super().__init__()
        self.head_dim = embedding_dim // heads
        self.embedding_dim = embedding_dim
        self.q_num = q_num

        assert heads * self.head_dim == embedding_dim
        self.w_q = nn.Linear(self.head_dim, self.head_dim)
        self.w_k = nn.Linear(self.head_dim, self.head_dim)
        self.w_v = nn.Linear(self.head_dim, self.head_dim)
        self.w_o = nn.Linear(embedding_dim, embedding_dim)
        self.drop_out = nn.Dropout(drop_out)
        self.softmax = nn.Softmax(dim=-1)
        self.with_mask = with_mask
        self.heads = heads
        self.sequence_len = sequence_len


        if with_mask :
            self.total_sequence_len = sequence_len + q_num
            self.input_mask = self.get_mask()
        else:
            self.total_sequence_len = sequence_len
            self.input_mask = None


        self.sqrt_d_out = math.sqrt(self.head_dim)

    def rope(self, x):
        base = 10000
        d = self.embedding_dim
        inv_freq = 1.0 / (base ** (torch.arange(0, d, 2) / d).float())
        seq_len = self.sequence_len
        seq_id = torch.arange(seq_len).float()
        id_theta = torch.einsum("m,d->md", seq_id, inv_freq)
        id_theta2 = torch.concat((id_theta, id_theta), dim=1)
        cos_cache = id_theta2.cos().to(x.device)
        sin_cache = id_theta2.sin().to(x.device)

        def rotate_half(_x):
            x1 = _x[..., :d // 2]
            x2 = _x[..., d // 2:]
            return torch.concat([-x2, x1], dim=-1)

        rotated_x = rotate_half(x)
        return (torch.concat([x[:,:seq_len,:]*cos_cache, x[:,seq_len:,:]],dim=1)
                + torch.concat([rotated_x[:,:seq_len,:]*sin_cache, x[:,seq_len:,:]], dim=1))



    def forward(self, input):
        b = input.shape[0]
        queries = self.rope(input).view(b, -1, self.heads, self.head_dim)
        keys = self.rope(input).view(b, -1, self.heads, self.head_dim)
        values = input.view(b, -1, self.heads, self.head_dim)

        queries = self.w_q(queries)
        keys = self.w_k(keys)
        values = self.w_v(values)
        energy = torch.einsum("nqhd,nkhd->nhqk", [queries, keys])

        if self.with_mask:
            self.input_mask = self.input_mask.to(input.device)
            energy = energy.masked_fill_(~self.input_mask, -1 * np.inf)

        atte = self.softmax(energy / self.sqrt_d_out)
        ret_atte = atte
        atte = self.drop_out(atte)
        out = torch.einsum("nhql,nlhd->nqhd", [atte, values]).reshape(
            b, self.total_sequence_len, self.embedding_dim
        )
        return self.w_o(out),ret_atte


    def get_mask(self):
        total_q_num = self.total_sequence_len - self.sequence_len
        mask = ((torch.triu(torch.ones(self.total_sequence_len, self.total_sequence_len)) == 1)
                .transpose(0, 1)).squeeze(0)

        mask[-1 * total_q_num:, -1 * total_q_num:] = False
        for i in range(total_q_num):
            mask[self.sequence_len + i, self.sequence_len + i] = True
        mask.requires_grad = False
        return mask



class Series_Compression(nn.Module):
    def __init__(self, sequence_len,embedding_dim, heads):
        super().__init__()
        self.sequence_len = sequence_len
        self.embedding_dim = embedding_dim
        self.mhsa = Self_Attention(embedding_dim, sequence_len, heads)

    def forward(self, ts):
        b = ts.shape[0]
        seq_len = ts.shape[-2]
        assert (
                (seq_len // self.sequence_len) == (seq_len / self.sequence_len)
        ), f"压缩序列需要被{self.sequence_len}整除"
        ts = ts.reshape(b * (seq_len // self.sequence_len), self.sequence_len, self.embedding_dim)
        return torch.mean(self.mhsa(ts)[0], dim=-2).reshape(b, seq_len // self.sequence_len, self.embedding_dim)

class RMSNorm(nn.Module):
    def __init__(self,normalized_shape, eps=1e-8):
        super(RMSNorm, self).__init__()
        self.eps = eps
        if isinstance(normalized_shape, int):
            normalized_shape = (normalized_shape,)
        self.weight = nn.Parameter(torch.ones(normalized_shape))

    def forward(self, x):
        mean_square = torch.mean(x ** 2, dim = -1,keepdim=True)
        x_normalized = x / torch.sqrt(mean_square + self.eps)
        return self.weight * x_normalized

class MLP(nn.Module):
    def __init__(self, in_feature):
        super().__init__()
        self.ln1 = nn.Linear(in_feature, in_feature*3,bias=False)
        self.ln2 = nn.Linear(in_feature, in_feature*3,bias=False)
        self.activate_fn = nn.SiLU()
        self.ln3 = nn.Linear(in_feature*3, in_feature,bias=False)

    def forward(self,x):
        gate_pro_out = self.activate_fn(self.ln1(x))
        up_pro_out = self.ln2(x)
        return self.ln3(gate_pro_out * up_pro_out)

class Casual_Transformer(nn.Module):
    def __init__(self,
                 embedding_dim,
                 heads,
                 sequence_len,
                 q_num,
                 ):
        super().__init__()
        self.embedding_dim = embedding_dim
        self.heads = heads
        self.sequence_len = sequence_len
        self.q_num = q_num
        self.atten_norm = RMSNorm(embedding_dim)
        self.mlp_norm = RMSNorm(embedding_dim)
        self.mmhsa = Self_Attention(embedding_dim,
                                    sequence_len,
                                    heads,
                                    q_num,
                                    with_mask=True,)
        self.mlp = MLP(embedding_dim)


    def forward(self, behaviors):
        if type(behaviors) == tuple:
            behaviors = behaviors[0]
        b, behavior_num, embedding_dim = behaviors.shape
        hidden_state, atte = self.mmhsa(self.atten_norm(behaviors))
        hidden_state = behaviors + hidden_state
        hidden_state = hidden_state + self.mlp(self.mlp_norm(hidden_state))
        return hidden_state, atte



class PositionalEncoding(nn.Module):
    def __init__(self, d_model, max_len=500):
        super(PositionalEncoding, self).__init__()
        pe = torch.zeros(max_len, d_model)
        position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
        pe[:, 0::2] = torch.sin(position * div_term)
        pe[:, 1::2] = torch.cos(position * div_term)
        pe = pe.unsqueeze(0)  # (1, max_len, d_model)
        self.register_buffer('pe', pe)

    def forward(self, x):
        return x + self.pe[:, :x.size(1)]


class MultiHeadAttention(nn.Module):
    def __init__(self, embed_dim, num_heads):
        super(MultiHeadAttention, self).__init__()
        self.embed_dim = embed_dim
        self.num_heads = num_heads
        self.head_dim = embed_dim // num_heads

        self.qkv = nn.Linear(embed_dim, embed_dim * 3)
        self.out = nn.Linear(embed_dim, embed_dim)

    def forward(self, x):
        batch_size, seq_len, _ = x.size()
        qkv = self.qkv(x).chunk(3, dim=-1)
        q, k, v = map(lambda t: t.view(batch_size, seq_len, self.num_heads, self.head_dim).transpose(1, 2), qkv)
        attn = (q @ k.transpose(-2, -1)) * (1.0 / math.sqrt(self.head_dim))
        attn = F.softmax(attn, dim=-1)
        out = (attn @ v).transpose(1, 2).contiguous().view(batch_size, seq_len, self.embed_dim)
        return self.out(out)


class FrequencyAwareAttention(nn.Module):
    def __init__(self, embed_dim):
        super(FrequencyAwareAttention, self).__init__()
        self.fc = nn.Linear(embed_dim, 1)

    def forward(self, x):
        # x: (batch_size, seq_len, embed_dim)
        freq_weights = torch.softmax(self.fc(x), dim=1)  # (batch_size, seq_len, 1)
        return x * freq_weights


class EBMLayer(nn.Module):
    def __init__(self, embed_dim, num_heads=4, num_blocks=2):
        super(EBMLayer, self).__init__()
        self.blocks = nn.ModuleList([
            nn.Sequential(
                MultiHeadAttention(embed_dim, num_heads),
                nn.LayerNorm(embed_dim),
                FrequencyAwareAttention(embed_dim),
                nn.ReLU()
            )
            for _ in range(num_blocks)
        ])

    def forward(self, x):
        for block in self.blocks:
            x = block(x)
        return x.mean(dim=1)

class HardNoiseEliminator(nn.Module):
    def __init__(self, embed_dim, temperature=0.1):
        super(HardNoiseEliminator, self).__init__()
        self.temperature = temperature
        self.fc = nn.Linear(embed_dim, embed_dim)

    def forward(self, sequence_emb, target_emb):
        # sequence_emb: (b, s, e)
        sequence_emb = self.fc(sequence_emb)  # (b, s, e)
        target_emb = self.fc(target_emb)      # (b, 1, e)

        similarity = torch.matmul(
            sequence_emb,
            target_emb.transpose(1, 2)
        ).squeeze(-1)  # (b, s)
        similarity = F.softmax(similarity / self.temperature, dim=1)
        noise_weights = 1 - similarity.mean(dim=1)  # (b,)
        return noise_weights.unsqueeze(1)  # (b, 1)


class SoftNoiseFilter(nn.Module):
    def __init__(self, embed_dim):
        super(SoftNoiseFilter, self).__init__()
        self.gate = nn.Sequential(
            nn.Linear(embed_dim * 2, 1),
            nn.Sigmoid()
        )

    def forward(self, combined_emb):
        # combined_emb: (batch_size, 3 * embed_dim)
        weights = self.gate(combined_emb)
        return weights  # (batch_size, 1)
