#!/usr/bin/env python
# coding: utf-8

# In[ ]:


import math
from typing import Optional, List
import torch
from torch import nn
from labml import tracker
import numpy as np
from labml_helpers.module import Module

logging.info("asdasd")

from labml_nn.utils import clone_module_list
run_test = True

torch.set_printoptions(precision=3,linewidth=500)


# MHA实现细节
# * Q和K的长度不一定一样，KV长度一定是一样的

# In[ ]:


from os import name
import re


class MultiHeadAttention(nn.Module):
    def __init__(self, heads: int, d_model: int, dropout_prob: float = 0.1, bias: bool = True):
        super().__init__()
        self.d_k = d_model//heads
        self.heads = heads

        self.q_proj = nn.Linear(d_model, d_model, bias=bias)
        self.k_proj = nn.Linear(d_model, d_model, bias=bias)
        self.v_proj = nn.Linear(d_model, d_model, bias=bias)
        self.output = nn.Linear(d_model, d_model)

        self.softmax = nn.Softmax(dim=1)
        self.dropout = nn.Dropout(dropout_prob)

        self.scale = 1/math.sqrt(self.d_k)

        self.attn = None

    def get_score(self, query: torch.Tensor, key: torch.Tensor):
        """
        Q shape:(seq_len_q, batch_size, num_heads, head_dim)
        K shape:(seq_len_k, batch_size, num_heads, head_dim)
        score shape:(seq_len_q, seq_len_k, batch_size, num_heads)
        """
        return torch.einsum('qbhd,kbhd->qkbh', query, key)

    def validate_mask(self, mask: torch.Tensor, query_shape: List[int], key_shape: List[int]):
        """校验mask

        Args:
            mask (torch.Tensor): 整体的mask，shape：(seq_len_q, seq_len_k, batch_size)
            query_shape (List[int]): q的形状
            key_shape (List[int]): k的形状
        """
        assert mask.dim() >= 2
        assert mask.shape[0] == 1 or mask.shape[0] == query_shape[0]
        assert mask.shape[1] == key_shape[0]
        if mask.dim() >= 3:
            assert mask.shape[2] == 1 or mask.shape[2] == query_shape[1]
        while mask.dim() < len(query_shape)+1:
            mask.unsqueeze_(-1)
        return mask


    def split_head_(self, x: torch.Tensor):
        head_shape = x.shape[:-1]
        x = x.view(*head_shape, self.heads, self.d_k)
        return x

    def forward(self, *, query: torch.Tensor, key: torch.Tensor, value: torch.Tensor, mask: Optional[torch.Tensor] = None):
        seq_len_q, batch_size, _ = query.shape

        if mask is not None:
            mask = self.validate_mask(mask, query.shape, key.shape)

        query = self.split_head_(self.q_proj(query))
        key = self.split_head_(self.k_proj(key))
        value = self.split_head_(self.v_proj(value))

        # (seq_len_q, seq_len_k, batch_size, num_heads)
        scores = self.get_score(query, key)*self.scale
        if mask is not None:
            scores = scores.masked_fill(mask == 0, float('-inf'))

        attn = self.softmax(scores)
        attn = self.dropout(attn)

        x = torch.einsum("qkbh,kbhd->qbhd", attn, value)
        self.attn = attn.detach()
        x = x.reshape(seq_len_q, batch_size, -1)
        return self.output(x)


if __name__ == '__main__':
    heads, d_model, seq_len_q, batch = 4, 64, 5, 2
    mha = MultiHeadAttention(heads=heads, d_model=d_model)

    q, k, v = torch.randn((seq_len_q, batch, d_model)),torch.randn((seq_len_q, batch, d_model)),torch.randn((seq_len_q, batch, d_model))

    mask1 = torch.tril(torch.ones((seq_len_q,seq_len_q)))
    print(mha(query=q, key=k, value=v, mask=mask1).shape)


# 位置编码Positional Encoding
# 
# register_buffer将张量注册为模型的缓冲区，它不会被优化器更新，可以选择是否成为state_dict。这一方法显然也会注册一个成员变量。

# In[ ]:


def get_positional_encoding(d_model: int, max_len: int = 5000) -> torch.Tensor:
    """
    生成位置编码
    参数:
        d_model: 模型维度
        max_len: 最大序列长度

    返回:
        torch.Tensor: 形状为(max_len, d_model)的位置编码张量
    """
    position = torch.arange(max_len).unsqueeze(1)
    div_term = torch.exp(torch.arange(0, d_model, 2, dtype=torch.float32) *
                         (-math.log(10000.0) / d_model))

    pe = torch.zeros(max_len, d_model)
    pe[:, 0::2] = torch.sin(position * div_term)
    pe[:, 1::2] = torch.cos(position * div_term)

    return pe.unsqueeze(1).requires_grad_(False)


class PositionalEncoding(nn.Module):
    def __init__(self, d_model: int, dropout_prob: float, max_len: int = 5000):
        super().__init__()
        self.dropout = nn.Dropout(dropout_prob)
        self.register_buffer('positional_encodings',
                             get_positional_encoding(d_model, max_len), False)

    def forward(self, x: torch.Tensor):
        pe = self.positional_encodings[:x.shape[0]
                                       ].detach().requires_grad_(False)
        return self.dropout(x+pe)


# Position-wise Feed-Forward Network (FFN)
# 
# 门控单元GLU被认为能够改善Transformer，它将一个门控的线性层结果与激活后的线性层结果相乘作为放大线性层的输入

# In[ ]:


class FeedForward(Module):
    def __init__(self, d_model: int, d_ff: int,
                 dropout: float = 0.1,
                 activation=nn.GELU(),
                 is_gated: bool = False,
                 bias1: bool = True,
                 bias2: bool = True,
                 bias_gate: bool = True):
        super().__init__()
        self.linear_1 = nn.Linear(d_model, d_ff, bias1)
        self.linear_2 = nn.Linear(d_ff, d_model, bias2)
        self.dropout = nn.Dropout(dropout)
        self.activation = activation
        self.is_gated = is_gated
        if is_gated:
            self.gate = nn.Linear(d_model, d_ff, bias_gate)

    def forward(self, x: torch.Tensor) -> torch.Tensor:
        g = self.activation(self.linear_1(x))
        if self.is_gated:
            g = g*self.gate(x)
        return self.linear_2(self.dropout(g))


# Encoder Layer & Decoder Layer
# 
# * Embedding的输入必须是[0,n_vocab-1]的整数，它对应了一个词汇，一个词汇对应了一个可学习的嵌入向量
# * 在计算embedding时乘以`math.sqrt(self.d_model)`,可以让嵌入向量的方差与维度成正比，提高数值稳定性
# * 可学习PE可以提高灵活性，但是增加了参数量，且效果依赖于数据，适用于特化，复杂，大规模的模型，GPT系列就使用了可学习编码

# In[ ]:


class EmbeddingWithPE(nn.Module):
    def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
        super().__init__()
        self.linear = nn.Embedding(n_vocab, d_model)
        self.d_model = d_model
        self.register_buffer("positional_encoding",
                             get_positional_encoding(d_model, max_len))

    def forward(self, x: torch.Tensor):
        pe = self.positional_encodings[:x.shape[0]].requires_grad_(False)
        return self.linear(x)*math.sqrt(self.d_model)+pe


class EmbeddingWithLearnedPE(nn.Module):
    def __init__(self, d_model: int, n_vocab: int, max_len: int = 5000):
        super().__init__()
        self.linear = nn.Embedding(n_vocab, d_model)
        self.d_model = d_model
        self.positional_encodings = nn.Parameter(
            torch.zeros(max_len, 1, d_model), requires_grad=True)

    def forward(self, x: torch.Tensor):
        pe = self.positional_encodings[:x.shape[0]]
        return self.linear(x) * math.sqrt(self.d_model) + pe


# In[ ]:


class TransformerLayer(nn.Module):
    def __init__(self, *,
                 d_model: int,
                 self_attn: MultiHeadAttention,
                 src_attn: MultiHeadAttention = None,
                 feed_forward: FeedForward,
                 dropout_prob: float):
        """这里的MHA没有使用positional encoding，需要被重载
        """
        super().__init__()
        self.size = d_model
        self.self_attn = self_attn
        self.src_attn = src_attn
        self.feed_forward = feed_forward
        self.dropout = nn.Dropout(dropout_prob)
        self.norm_self_attn = nn.RMSNorm([d_model])
        if self.src_attn is not None:
            self.norm_src_attn = nn.RMSNorm([d_model])
        self.norm_ff = nn.RMSNorm([d_model])
        self.is_save_ff_input = False

    def forward(self, *, x: torch.Tensor, mask: torch.Tensor,
                src: torch.Tensor = None,
                src_mask: torch.Tensor = None):
        """_summary_

        Args:
            x (torch.Tensor): _description_
            mask (torch.Tensor): _description_
            src (torch.Tensor): _description_
            src_mask (torch.Tensor): _description_
        """

        z = self.norm_self_attn(x)
        self_attn = self.self_attn(query=z, key=z, value=z, mask=mask)
        x = x+self.dropout(self_attn)
        # If a source is provided, get results from attention to source. This is when you have a decoder layer that pays attention to encoder outputs
        if src is not None:
            z = self.norm_src_attn(z)
            src_attn = self.src_attn(
                query=z, key=src, value=src, mask=src_mask)
            x = x + self.dropout(src_attn)

        # after attn, use ffn
        z = self.norm_ff(x)
        if self.is_save_ff_input:
            self.ff_input = z.clone()
        ff = self.feed_forward(z)
        x = x+self.dropout(ff)

        return x



# In[ ]:


class Encoder(nn.Module):
    def __init__(self, layer: TransformerLayer, n_layers: int):
        super().__init__()

        self.layers = clone_module_list(layer, n_layers)
        self.norm = nn.RMSNorm([layer.size])

    def forward(self, x:torch.Tensor, mask:torch.Tensor):
        for layer in self.layers:
            x = layer(x=x, mask=mask)
        return self.norm(x)

class Decoder(nn.Module):
    def __init__(self, layer: TransformerLayer, n_layers: int):
        super().__init__()

        self.layers = clone_module_list(layer, n_layers)
        self.norm = nn.RMSNorm([layer.size])

    def forward(self, x: torch.Tensor, memory: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor):
        for layer in self.layers:
            x = layer(x=x, mask=tgt_mask, src=memory, src_mask=src_mask)
        return self.norm(x)


# In[ ]:


class Generator(nn.Module):
    def __init__(self, n_vocab: int, d_model: int):
        """由token embedding生成token
        """
        super().__init__()
        self.projection = nn.Linear(d_model, n_vocab)

    def forward(self, x):
        return self.projection(x)



# In[ ]:


class EncoderDecoder(nn.Module):
    def __init__(self, encoder: Encoder,  decoder: Decoder,
                 src_embed: nn.Module, tgt_embed: nn.Module,
                 generator: nn.Module):
        super().__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.src_embed = src_embed
        self.tgt_embed = tgt_embed
        self.generator = generator

        for p in self.parameters():
            if p.dim()>1:
                nn.init.xavier_uniform_(p)

    def encode(self, src:torch.Tensor, src_mask:torch.Tensor):
        return self.encoder(self.src_embed(src), src_mask)

    def decode(self,  memory: torch.Tensor, src_mask: torch.Tensor, tgt: torch.Tensor, tgt_mask: torch.Tensor):
        return self.decoder(self.tgt_embed(tgt), memory, src_mask, tgt_mask)

    def forward(self, src: torch.Tensor, tgt: torch.Tensor, src_mask: torch.Tensor, tgt_mask: torch.Tensor):
        enc = self.encode(src, src_mask)
        return self.decode(enc, src_mask, tgt, tgt_mask)

