import math

import numpy as np
import torch
from torch import nn
from utils_func import masked_softmax, top_k_top_p_filtering
from transformers import AutoModel, GPT2LMHeadModel, AutoTokenizer, CpmTokenizer
import torch.nn.functional as F


class Encoder(nn.Module):
    """编码器-解码器架构的基本编码器接⼝"""

    def __init__(self, **kwargs):
        super(Encoder, self).__init__(**kwargs)

    def forward(self, X, *args):
        raise NotImplementedError


class Decoder(nn.Module):
    """编码器-解码器架构的基本解码器接⼝"""

    def __init__(self, **kwargs):
        super(Decoder, self).__init__(**kwargs)

    def init_state(self, enc_outputs, *args):
        raise NotImplementedError

    def forward(self, X, state):
        raise NotImplementedError


class EncoderDecoder(nn.Module):
    """编码器-解码器架构的基类"""

    def __init__(self, encoder, decoder, **kwargs):
        super(EncoderDecoder, self).__init__(**kwargs)
        self.encoder = encoder
        self.decoder = decoder

    def forward(self, enc_X, dec_X, *args):
        enc_outputs = self.encoder(enc_X, *args)
        dec_state = self.decoder.init_state(enc_outputs, *args)
        return self.decoder(dec_X, dec_state)


class Seq2SeqEncoder(Encoder):
    """⽤于序列到序列学习的循环神经⽹络编码器"""

    def __init__(self, vocab_size, embed_size, num_hiddens, num_layers,
                 dropout=0, **kwargs):
        super(Seq2SeqEncoder, self).__init__(**kwargs)
        # 嵌⼊层
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size, num_hiddens, num_layers,
                          dropout=dropout)

    def forward(self, X, *args):
        # 输出'X'的形状：(batch_size,num_steps,embed_size)
        X = self.embedding(X)
        # 在循环神经⽹络模型中，第⼀个轴对应于时间步
        X = X.permute(1, 0, 2)
        # 如果未提及状态，则默认为0
        output, state = self.rnn(X)
        # output的形状:(num_steps,batch_size,num_hiddens)
        # state[0]的形状:(num_layers,batch_size,num_hiddens)
        return output, state


class Seq2SeqDecoder(Decoder):
    """⽤于序列到序列学习的循环神经⽹络解码器"""

    def __init__(self, vocab_size=30000, embed_size=128, num_hiddens=128, num_layers=2,
                 dropout=0, **kwargs):
        super(Seq2SeqDecoder, self).__init__(**kwargs)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers,
                          dropout=dropout)
        self.dense = nn.Linear(num_hiddens, vocab_size)

    def init_state(self, enc_outputs, *args):
        return enc_outputs[1]

    def forward(self, X, state):
        # 输出'X'的形状：(batch_size,num_steps,embed_size)
        X = self.embedding(X).permute(1, 0, 2)
        # ⼴播context，使其具有与X相同的num_steps
        context = state[-1].repeat(X.shape[0], 1, 1)  # 使用encode的最后一个step的状态作为上下文,重复X_steps次分别拼接至X
        X_and_context = torch.cat((X, context), 2)
        output, state = self.rnn(X_and_context, state)
        output = self.dense(output).permute(1, 0, 2)  # output中每一步都对应一个预测值,组成一个输出序列,每个预测值参考的状态都是在其前面的
        # output的形状:(batch_size,num_steps,vocab_size)
        # state[0]的形状:(num_layers,batch_size,num_hiddens)
        return output, state


class AttentionDecoder(Decoder):
    """带有注意⼒机制解码器的基本接⼝"""

    def __init__(self, **kwargs):
        super(AttentionDecoder, self).__init__(**kwargs)

    @property
    def attention_weights(self):
        raise NotImplementedError


class AdditiveAttention(nn.Module):
    """加性注意⼒
    也就是: 对于value的权重系数的计算方式是:
    Q和K->线性变换至hidden维度,再相加! 最后再线性变换至1个单个值
    """

    def __init__(self, key_size, query_size, num_hiddens, dropout, **kwargs):
        super(AdditiveAttention, self).__init__(**kwargs)
        self.W_k = nn.Linear(key_size, num_hiddens, bias=False)
        self.W_q = nn.Linear(query_size, num_hiddens, bias=False)
        self.w_v = nn.Linear(num_hiddens, 1, bias=False)
        self.dropout = nn.Dropout(dropout)

    def forward(self, queries, keys, values, valid_lens):
        """
        :param queries: (batch_size, query_nums, query_size)
        :param keys:   (batch_size, kv_nums, key_size)
        :param values:  (batch_size, kv_nums, value_size)  key_nums = value_nums = kv_num
        :param valid_lens: (batch_size,) 指明每个句子的有效长度是多少
        :return:
        """
        queries, keys = self.W_q(queries), self.W_k(keys)
        # 在维度扩展后，
        # queries的形状：(batch_size，查询的个数，1，num_hidden)
        # key的形状：(batch_size，1，“键－值”对的个数，num_hiddens)
        # 使⽤⼴播⽅式进⾏求和
        features = queries.unsqueeze(2) + keys.unsqueeze(1)
        features = torch.tanh(features)
        # self.w_v仅有⼀个输出，因此从形状中移除最后那个维度。
        # scores的形状：(batch_size，查询的个数，“键-值”对的个数) valid_lens:(batch_size,)
        scores = self.w_v(features).squeeze(-1)
        self.attention_weights = masked_softmax(scores, valid_lens)
        # values的形状：(batch_size，“键－值”对的个数，值的维度)
        return torch.bmm(self.dropout(self.attention_weights), values)  # 批量矩阵乘法


class DotProductAttention(nn.Module):
    """缩放点积注意⼒
    参数权重由K和Q点积得到,这就要求K和Qsize相同
    """

    def __init__(self, dropout, **kwargs):
        super(DotProductAttention, self).__init__(**kwargs)
        self.dropout = nn.Dropout(dropout)

    # queries的形状：(batch_size，查询的个数，d)
    # keys的形状：(batch_size，“键－值”对的个数，d)
    # values的形状：(batch_size，“键－值”对的个数，值的维度)
    # valid_lens的形状:(batch_size，)或者(batch_size，查询的个数)
    def forward(self, queries, keys, values, valid_lens=None):
        d = queries.shape[-1]
        # 设置transpose_b=True为了交换keys的最后两个维度
        scores = torch.bmm(queries, keys.transpose(1, 2)) / math.sqrt(d)
        self.attention_weights = masked_softmax(scores, valid_lens)
        return torch.bmm(self.dropout(self.attention_weights), values)


class GxlSelfAttention(nn.Module):
    def __init__(self, embed_dim=252, num_heads=12, dropout=0.1):
        super(GxlSelfAttention, self).__init__()

        # Multi-Head Self Attention
        self.num_heads = num_heads
        self.head_dim = embed_dim // num_heads

        # Query, Key, and Value projections
        self.c_attn = nn.Conv1d(embed_dim, embed_dim * 3, kernel_size=1, bias=False)
        self.c_proj = nn.Conv1d(embed_dim, embed_dim, kernel_size=1, bias=False)

        # Dropout layers
        self.attn_dropout = nn.Dropout(p=dropout, inplace=False)
        self.resid_dropout = nn.Dropout(p=dropout, inplace=False)

    def forward(self, x, mask=None):
        # Input x: [batch_size, seq_length, embed_dim]

        batch_size, seq_length, embed_dim = x.size()

        # Project the input into query, key, and value
        qkv = self.c_attn(x.transpose(1, 2))  # [batch_size, 3 * embed_dim, seq_length]
        q, k, v = qkv.chunk(3, dim=1)  # Split into query, key, and value

        # Reshape for multi-head attention
        q = q.view(batch_size, self.num_heads, self.head_dim, seq_length).transpose(2,
                                                                                    3)  # [batch_size, num_heads, seq_length, head_dim]
        k = k.view(batch_size, self.num_heads, self.head_dim, seq_length).transpose(2,
                                                                                    3)  # [batch_size, num_heads, seq_length, head_dim]
        v = v.view(batch_size, self.num_heads, self.head_dim, seq_length).transpose(2,
                                                                                    3)  # [batch_size, num_heads, seq_length, head_dim]

        # Compute attention scores and apply softmax
        attn_scores = torch.matmul(q, k.transpose(-2, -1)) / (
                self.head_dim ** 0.5)  # [batch_size, num_heads, seq_length, seq_length]
        if mask is not None:
            mask = mask.unsqueeze(1).unsqueeze(2)  # [batch_size ,1 ,1 ,seq_length]
            mask = mask.expand_as(attn_scores)  # [batch_size ,num_heads ,seq_length ,seq_length]
            attn_scores = attn_scores.masked_fill(mask == 0, -1e9)  # 将mask中为0的位置对应的attn_scores设置为一个很小的负数
        attn_probs = nn.functional.softmax(attn_scores, dim=-1)  # Apply softmax along the last dimension
        # Apply attention dropout
        attn_probs = self.attn_dropout(attn_probs)
        # Compute weighted sum of values
        attn_output = torch.matmul(attn_probs, v)  # [batch_size, num_heads, seq_length, head_dim]

        # Reshape and concatenate heads
        attn_output = attn_output.transpose(2, 3).contiguous().view(batch_size, seq_length,
                                                                    embed_dim)  # [batch_size, seq_length, embed_dim]

        # Project back to model's dimension
        attn_output = self.c_proj(attn_output.transpose(1, 2))  # [batch_size, embed_dim, seq_length]
        attn_output = self.resid_dropout(attn_output)

        # Add residual connection and LayerNorm
        output = x + attn_output
        return output


class ScaledDotProductAttention(nn.Module):
    """Scaled dot-product attention mechanism."""

    def __init__(self, attention_dropout=0.0):
        super(ScaledDotProductAttention, self).__init__()
        self.dropout = nn.Dropout(attention_dropout)
        self.softmax = nn.Softmax(dim=2)

    def forward(self, q, k, v, scale=None, attn_mask=None):
        """前向传播.

        Args:
            q: Queries张量，形状为[B, L_q, D_q]
            k: Keys张量，形状为[B, L_k, D_k]
            v: Values张量，形状为[B, L_v, D_v]，一般来说就是k
            scale: 缩放因子，一个浮点标量
            attn_mask: Masking张量，形状为[B, L_q, L_k]

        Returns:
            上下文张量和attetention张量
        """
        attention = torch.bmm(q, k.transpose(1, 2))
        if scale:
            attention = attention * scale
        if attn_mask:
            # 给需要mask的地方设置一个负无穷
            attention = attention.masked_fill_(attn_mask, -np.inf)
        # 计算softmax
        attention = self.softmax(attention)
        # 添加dropout
        attention = self.dropout(attention)
        # 和V做点积
        context = torch.bmm(attention, v)
        return context, attention


class GxlCommonAttention(nn.Module):

    def __init__(self, embedding_dim=252, num_heads=12, dropout=0.0):
        super(GxlCommonAttention, self).__init__()

        self.dim_per_head = embedding_dim // num_heads
        self.num_heads = num_heads
        self.linear_k = nn.Linear(embedding_dim, self.dim_per_head * num_heads)
        self.linear_v = nn.Linear(embedding_dim, self.dim_per_head * num_heads)
        self.linear_q = nn.Linear(embedding_dim, self.dim_per_head * num_heads)

        self.linear_final = nn.Linear(embedding_dim, embedding_dim)
        self.dropout = nn.Dropout(dropout)
        # multi-head attention之后需要做layer norm
        self.layer_norm = nn.LayerNorm(embedding_dim)
        self.dot_product_attention = ScaledDotProductAttention()

    def forward(self, key, value, query, attn_mask=None):
        # 残差连接
        residual = query

        dim_per_head = self.dim_per_head
        num_heads = self.num_heads
        batch_size = key.size(0)

        # linear projection
        key = self.linear_k(key)
        value = self.linear_v(value)
        query = self.linear_q(query)

        # split by heads
        key = key.view(batch_size * num_heads, -1, dim_per_head)
        value = value.view(batch_size * num_heads, -1, dim_per_head)
        query = query.view(batch_size * num_heads, -1, dim_per_head)

        if attn_mask:
            attn_mask = attn_mask.repeat(num_heads, 1, 1)
        # scaled dot product attention
        scale = (key.size(-1) // num_heads) ** -0.5
        context, attention = self.dot_product_attention(
            query, key, value, scale, attn_mask)

        # concat heads
        context = context.view(batch_size, -1, dim_per_head * num_heads)
        # final linear projection
        output = self.linear_final(context)
        # dropout
        output = self.dropout(output)
        # add residual and norm layer
        output = self.layer_norm(residual + output)

        return output, attention


class GXLMLP(nn.Module):
    def __init__(self, embed_dim=252, drop_rate=0.081024):
        super(GXLMLP, self).__init__()

        # Point-wise feedforward network
        self.c_fc = nn.Conv1d(embed_dim, embed_dim * 4, kernel_size=1, bias=True)
        self.c_proj = nn.Conv1d(embed_dim * 4, embed_dim, kernel_size=1, bias=True)
        self.act = nn.ReLU()
        # Dropout layer
        self.dropout = nn.Dropout(p=drop_rate, inplace=False)

    def forward(self, x):
        # Input x: [batch_size, seq_length, embed_dim]

        # Apply feedforward network and activation
        h = self.act(self.c_fc(x.transpose(1, 2)))  # [batch_size, 4 * embed_dim, seq_length]
        h2 = self.c_proj(h)  # [batch_size, embed_dim, seq_length]

        # Apply dropout
        h2 = self.dropout(h2)

        # Add residual connection
        output = x + h2
        return output


# 定义一个函数，根据输入序列的长度生成seq_mask
def generate_seq_mask(seq_length):
    """
    输入seq_length
    返回seq_mask，
    形状为 (seq_length, seq_length)(L:目标序列序列长度,S:源序列长度),它的上三角部分是True，其余部分是False
    mask为true,代表要mask
    """
    mask = torch.triu(torch.ones(seq_length, seq_length), diagonal=1).bool()
    return mask  # 返回seq_mask，形状为 (seq_length, seq_length)


# 定义一个函数，根据输入序列和填充符号生成pad_mask
def generate_pad_mask(input_seq, pad_token):
    """
    input_seq: 输入序列，一个张量，形状为 (batch_size, seq_length)
    pad_token: 填充符号，一个整数，表示输入序列中无效的位置
    """
    # 生成一个与输入序列相同形状的布尔张量，其中为True的位置表示填充符号
    pad_mask = input_seq == pad_token  # (batch_size, seq_length)
    # 将布尔张量转换为整数张量，其中为1的位置表示填充符号
    pad_mask = pad_mask.int()  # (batch_size, seq_length)

    return pad_mask  # 返回pad_mask，形状为 (batch_size, seq_length)


class SelfAttentionBlock(nn.Module):
    def __init__(self, embedding_dim=252, head_num=12, dropout=0.081024):
        super(SelfAttentionBlock, self).__init__()
        self.ln_1 = nn.LayerNorm(embedding_dim, eps=1e-05, elementwise_affine=True)
        self.attn = GxlSelfAttention(embedding_dim, head_num)
        self.ln_2 = nn.LayerNorm(embedding_dim, eps=1e-05, elementwise_affine=True)
        self.mlp = GXLMLP(embedding_dim, dropout)

    def forward(self, x):
        # Apply LayerNorm1
        x_normalized = self.ln_1(x)
        # Apply the self-attention mechanism (assuming self.attn is a nn.Module)
        attn_output = self.attn(x_normalized)  # Adjust this line based on input shape
        # Residual connection and LayerNorm2
        x_residual = x + attn_output
        x_normalized_residual = self.ln_2(x_residual)
        # Apply MLP
        mlp_output = self.mlp(x_normalized_residual)  # Adjust this line based on input shape
        # Residual connection and final output
        output = x_residual + mlp_output
        return output


class Seq2SeqAttentionEncoder(Encoder):
    """⽤于序列到序列学习的循环神经⽹络编码器"""

    def __init__(self, vocab_size, head_num=12, embed_size=252, num_layers=2,
                 dropout=0.081024, **kwargs):
        super(Seq2SeqAttentionEncoder, self).__init__(**kwargs)
        # 嵌⼊层
        self.wte = nn.Embedding(vocab_size, embed_size)
        self.wpe = nn.Embedding(1024, embedding_dim=embed_size)
        self.drop = nn.Dropout(p=dropout, inplace=False)
        self.h = nn.ModuleList([SelfAttentionBlock(head_num) for _ in range(num_layers)])
        self.ln_f = nn.LayerNorm(768, eps=1e-05, elementwise_affine=True)

    def forward(self, input_ids, *args):
        # input_ids shape: (batch_size, sequence_length)
        # Embedding
        wte_output = self.wte(input_ids)  # shape: (batch_size, sequence_length, 252)
        b, t, c = wte_output.size()
        # Positional encodings
        wpe_output = self.wpe(torch.arange(t, device=wte_output.device))[None, :, :].expand(b, t, c)
        # wpe_output shape: (batch_size, sequence_length, 252)

        # Combine embeddings and positional encodings
        hidden_states = wte_output + wpe_output
        # hidden_states shape: (batch_size, sequence_length, 252)

        # Apply dropout
        hidden_states = self.drop(hidden_states)
        # hidden_states shape: (batch_size, sequence_length, 252)

        # Loop through selfAttentionBlocks
        for block in self.h:
            hidden_states = block(hidden_states)
            # hidden_states shape: (batch_size, sequence_length, 252)

        # Apply LayerNorm
        output = self.ln_f(hidden_states)
        # output shape: (batch_size, sequence_length, 252)

        return output


class GxlAttentionDecoder(nn.Module):
    def __init__(self, vocab_size, embed_size=256, num_heads=12, layers=2, dropout=0.0897):
        super(GxlAttentionDecoder, self).__init__()
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.positional_encoding = nn.Embedding(1024, embed_size)
        # Self-Attention
        self.self_attention = GxlSelfAttention(embed_dim=embed_size, num_heads=num_heads, dropout=dropout)
        # Multi-Head Attention with Encoder Output
        self.encoder_attention = nn.MultiheadAttention(embed_dim=embed_size, num_heads=num_heads, dropout=dropout)
        # Feedforward Neural Network
        self.feedforward = GXLMLP(embed_size, dropout)

        self.dropout = nn.Dropout(dropout)
        self.norm1 = nn.LayerNorm(embed_size)
        self.norm2 = nn.LayerNorm(embed_size)
        self.norm3 = nn.LayerNorm(embed_size)

    def forward(self, x, encoder_output, self_attention_mask, encoder_attention_mask):
        x = self.embedding(x) + self.positional_encoding(x)
        # Multi-Head Self-Attention
        self_attention_output, _ = self.self_attention(x, attn_mask=self_attention_mask)
        x = x + self.dropout(self_attention_output)
        x = self.norm1(x)

        # Multi-Head Attention with Encoder Output
        encoder_attention_output, _ = self.encoder_attention(x, encoder_output, encoder_output,
                                                             attn_mask=encoder_attention_mask)
        x = x + self.dropout(encoder_attention_output)
        x = self.norm2(x)
        # Feedforward Neural Network
        feedforward_output = self.feedforward(x)
        x = x + self.dropout(feedforward_output)
        x = self.norm3(x)
        return x


class AttentionModel(nn.Module):
    def __init__(self, vocab_size, embed_size=256, heads_num=12, layers=2, dropout=0.0897):
        super(AttentionModel, self).__init__()
        self.encoder = Seq2SeqAttentionEncoder(vocab_size, embed_size, head_num=heads_num, num_layers=layers,
                                               dropout=dropout)
        self.decoder = GxlAttentionDecoder(vocab_size, embed_size, num_heads=heads_num, layers=layers, dropout=dropout)

    def forward(self, enc_inputs, dec_inputs):
        """
        enc_inputs: (batch_size, seq_length1)
        dec_inputs: (batch_size, seq_length2)
        """
        padding_mask = generate_pad_mask(enc_inputs, -2)
        enc_outputs = self.encoder(enc_inputs)  # enc_outputs: (batch_size, seq_length, 252)
        self_attention_mask = generate_seq_mask(dec_inputs.size(1))
        dec_outputs = self.decoder.forward(dec_inputs, enc_outputs,
                                           encoder_attention_mask=padding_mask,self_attention_mask=self_attention_mask)  # dec_outputs: (batch_size, seq_length, 252)
        return dec_outputs


class Seq2SeqAttentionDecoder(AttentionDecoder):
    """带有注意力机制的解码器"""

    def __init__(self, vocab_size, embed_size=252, num_hiddens=252, num_layers=2,
                 dropout=0.08976, **kwargs):
        super(Seq2SeqAttentionDecoder, self).__init__(**kwargs)
        self.attention = AdditiveAttention(num_hiddens, num_hiddens, num_hiddens, dropout)
        self.embedding = nn.Embedding(vocab_size, embed_size)
        self.rnn = nn.GRU(embed_size + num_hiddens, num_hiddens, num_layers, dropout=dropout)
        self.dense = nn.Linear(num_hiddens, vocab_size)

    def init_state(self, enc_outputs, enc_valid_lens, *args):
        """outputs的形状为(batch_size，num_steps，num_hiddens).
        hidden_state的形状为(num_layers，batch_size，num_hiddens)"""
        # outputs, hidden_state = enc_outputs
        # return (outputs.permute(1, 0, 2), hidden_state, enc_valid_lens)
        return None

    def forward(self, X, state):
        # enc_outputs的形状为(batch_size,num_steps,num_hiddens).
        # hidden_state的形状为(num_layers,batch_size, num_hiddens)
        enc_outputs, hidden_state, enc_valid_lens = state
        # 输出X的形状为(num_steps,batch_size,embed_size)
        X = self.embedding(X).permute(1, 0, 2)
        outputs, self._attention_weights = [], []
        for x in X:
            # query的形状为(batch_size,1,num_hiddens)
            query = torch.unsqueeze(hidden_state[-1], dim=1)
            # context的形状为(batch_size,1,num_hiddens)
            context = self.attention(
                query, enc_outputs, enc_outputs, enc_valid_lens)
            # 在特征维度上连结
            x = torch.cat((context, torch.unsqueeze(x, dim=1)), dim=-1)
            # 将x变形为(1,batch_size,embed_size+num_hiddens)
            out, hidden_state = self.rnn(x.permute(1, 0, 2), hidden_state.contiguous())
            outputs.append(out)
            self._attention_weights.append(self.attention.attention_weights)
        # 全连接层变换后，outputs的形状为
        # (num_steps,batch_size,vocab_size)
        outputs = self.dense(torch.cat(outputs, dim=0))
        return outputs.permute(1, 0, 2), [enc_outputs, hidden_state, enc_valid_lens]


class PreTrainSeq2seqEncoder(Encoder):
    def __init__(self):
        super(PreTrainSeq2seqEncoder, self).__init__()
        model, _ = get_chat_model_tokenizer()
        self.transformer = model.transformer
        for param in self.transformer.parameters():
            param.requires_grad = False
        self.dense = nn.Linear(768, 128)

    def forward(self, X, *args):  # X:(batch,seq)
        X = self.transformer(X)  # (batch,seq,768)
        X = X.last_hidden_state
        X = self.dense(X)  # (batch,seq,128)
        state = X[:, -1, :].unsqueeze(1)  # (batch,1,128)
        state = torch.cat([state, state], dim=1)
        return X.permute(1, 0, 2), state.permute(1, 0, 2)


model = GPT2LMHeadModel.from_pretrained("../../../smart_speaker/chatbot_pytorch_2/model/novel/epoch50")
tokenizer = CpmTokenizer('../../../smart_speaker/chatbot_pytorch_2/vocab/chinese_vocab.model')


def get_chat_model_tokenizer():
    """得到预训练的model和tokenizer"""
    return model, tokenizer


def get_big_model():
    # data = torch.ones(100, 20, dtype=torch.int64)
    encoder = PreTrainSeq2seqEncoder()
    decoder = Seq2SeqAttentionDecoder()
    model = EncoderDecoder(encoder, decoder)
    # inputs = torch.ones(100, 20, dtype=torch.int64)
    # valid_len_x = torch.ones(100, dtype=torch.int64)
    # print(model(inputs, inputs, valid_len_x)[0].shape)
    return model
