import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import math
import copy
import xlwings as xw
import pandas as pd
from rich import print


class Embeddings(nn.Module):
    def __init__(self, num,dim):
        super(Embeddings,self).__init__()
        self.num = num
        self.dim = dim
        self.embedding = nn.Embedding(num_embeddings=num, embedding_dim=dim)
    def forward(self, x):
        return self.embedding(x)
    
class PositionalEncoding(nn.Module):
    def __init__(self, dim,max_len=5000,dp=0.1):
        super(PositionalEncoding,self).__init__()
        self.dropout = nn.Dropout(dp)
        self.dim = dim
        self.max_len = max_len
        pe = torch.zeros(max_len, dim)
        self.position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
        self.sub_dim = torch.arange(0,dim,2).float()*-(math.log(1000.0))
        pe[:,0::2] = torch.sin(self.position*self.sub_dim)
        pe[:,1::2] = torch.cos(self.position*self.sub_dim)
        
        self.register_buffer('pe',pe.unsqueeze(0))
    def forward(self,x):
        d_size = x.size(1)
        pe = self.pe[:,:d_size,:]
        x = x + pe
        return self.dropout(x)

def cloneModel(model,num=4):
    return nn.ModuleList([copy.deepcopy(model) for _ in range(num)])


def Attention(q,k,v,mask=None,dropout=None):
    # q (5,10,512)
    d_size = q.size(-1)
    x = torch.matmul(q,k.transpose(-2,-1)) # (5,10,10)
    atten = x/math.sqrt(d_size)
    if mask is not None:
        atten = atten.masked_fill(mask==0,-1e9)
    p_atten = F.softmax(atten,dim=-1)
    if dropout is not None:
        p_atten = dropout(p_atten)
    x = torch.matmul(p_atten,v) # (5,10,10) & (5,10,512)
    return x,p_atten
    
    
class MultiHeaderAttention(nn.Module):
    def __init__(self,dim,num_head,dp=0.1):
        super(MultiHeaderAttention,self).__init__()
        self.dim = dim # 512
        self.num_head = num_head # 8
        self.dp = dp
        self.dim_head = dim//num_head # 每个头 512/8=64
        init_linear = nn.Linear(dim,dim)
        self.linears = cloneModel(init_linear,4)
        self.dropout = nn.Dropout(dp)
        
    def forward(self,q,k,v,mask=None):
        batch_size = q.size(0)
        q,k,v = [linear(x).view(batch_size,-1,self.num_head,self.dim_head).transpose(1,2) for linear,x in zip(self.linears,(q,k,v))]
        x,p_atten = Attention(q,k,v,mask=mask,dropout=self.dropout)
        y = x.transpose(1,2).contiguous().view(batch_size,-1,self.num_head*self.dim_head)
        return y

class PositionWiseFeedForward(nn.Module):
    def __init__(self, dim,middle_dim,dp=0):
        super().__init__()
        self.n1 = nn.Linear(dim,middle_dim)
        self.n2 = nn.Linear(middle_dim,dim)
        self.drop = nn.Dropout(dp)
    
    def forward(self,x):
        return self.n2(self.drop(F.relu(self.n1(x))))

class NormalizationLayer(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.zeors = nn.Parameter(torch.zeros(dim))
        self.ones = nn.Parameter(torch.ones(dim))
        self.eps = 1e-9
    def forward(self,x:torch.Tensor):
        mean = x.mean(dim=-1,keepdim=True)
        std = x.std(dim=-1,keepdim=True)
        return self.ones*(x-mean)/(std+self.eps)+self.zeors


class Sublayer(nn.Module):
    def __init__(self,dim,dp=0.1):
        super().__init__()
        self.norm = NormalizationLayer(dim)
        self.drop = nn.Dropout(dp)
    def forward(self,x,sublayer):
        return x+self.drop(sublayer(self.norm(x)))


class EncoderLayer(nn.Module):
    def __init__(self,attention,feed_ward,dim,dp=0.1):
        super(EncoderLayer,self).__init__()
        self.attention = attention
        self.feed_ward = feed_ward
        sublayer = Sublayer(dim=dim,dp=dp)
        self.sublayers = cloneModel(sublayer,num=2)
        
        
    def forward(self,x,mask=None):
        # 多头注意力机制 + 前馈全连接层
        x = self.sublayers[0](x,lambda x1: self.attention(x1,x1,x1,mask))
        return self.sublayers[1](x,self.feed_ward)


class Encoder(nn.Module):
    def __init__(self,layer, n=6,dim=512):
        super(Encoder,self).__init__()
        self.layers = cloneModel(layer,n)
        self.norm = NormalizationLayer(dim)
    
    def forward(self,x,mask=None):
        for layer in self.layers:
            x = layer(x,mask=mask)
        return self.norm(x)
        

# 解码器
class DecoderLayer(nn.Module):
    def __init__(self,attention,feed_ward,dim,dp=0.1):
        super().__init__()
        self.attention = attention
        self.feed_ward = feed_ward
        sublay = Sublayer(dim,dp=dp)
        self.sublayers = cloneModel(sublay,num=3)
    def forward(self,x,memory,first_mask,last_mask):
        '''
            first_mask: 结果不能依靠后面的输出，只能依靠前面的输出。自注意力机制
                array([[0., 1., 1.],
                       [0., 0., 1.],
                       [0., 0., 0.]])
            
            last_mask:
        '''
        x = self.sublayers[0](x,lambda x1: self.attention(x1,x1,x1,mask=first_mask))
        x = self.sublayers[1](x,lambda x1: self.attention(x1,memory,memory,mask=last_mask)) # cross attention
        x = self.sublayers[2](x,self.feed_ward)
        return x

class Decoder(nn.Module):
    def __init__(self,layer, n=4,dim=512):
        super(Decoder,self).__init__()
        self.layers = cloneModel(layer,n)
        self.norm = NormalizationLayer(dim)
    
    def forward(self,x,memory,first_mask,last_mask):
        for layer in self.layers:
            x = layer(x,memory,first_mask,last_mask)
        return self.norm(x)
        

class Gennaration(nn.Module):
    def __init__(self, vocab_size,dim=512):
        super().__init__()
        self.l = nn.Linear(dim,vocab_size)
    def forward(self,x):
        x = self.l(x)
        return F.log_softmax(x,dim=-1)
        # return F.softmax(x,dim=-1)
        


class EncoderDecoder(nn.Module):
    def __init__(self,encoder,decoder,src_embed,tgt_embed,generator):
        super(EncoderDecoder,self).__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.src_embed = src_embed
        self.tgt_embed = tgt_embed
        self.generator = generator
    
    def forward(self,x_input,x_mask,y_input,first_mask,last_mask):
        input_res = self.encode(x_input,x_mask)
        res = self.decode(y_input,first_mask,last_mask,input_res)
        return self.generator(res)
    
    def encode(self,x_input,x_mask):
        input_res = self.encoder(self.src_embed(x_input),x_mask)
        return input_res
    
    def decode(self,y_input,first_mask,last_mask,input_res):
        output_res = self.decoder(self.tgt_embed(y_input),input_res,first_mask,last_mask,)
        return output_res


def make_model(src_vocab, tgt_vocab, N=6,dim=512,h=8,middle_dim=1024):
    c = copy.deepcopy
    src_embed = nn.Embedding(src_vocab, dim)
    tgt_embed = nn.Embedding(tgt_vocab, dim)
    position = PositionalEncoding(dim=dim)
    
    tgt_embed = copy.deepcopy(tgt_embed)
    tgt_position = copy.deepcopy(position)
    
    attention = MultiHeaderAttention(dim=dim,num_head=h)
    feed_ward = PositionWiseFeedForward(dim=dim,middle_dim=middle_dim)
    encoder = Encoder(EncoderLayer(c(attention),c(feed_ward),dim=dim),n=N,dim=dim)
    decoder = Decoder(DecoderLayer(c(attention),c(feed_ward),dim=dim),n=N,dim=dim)
    
    generator = Gennaration(vocab_size=tgt_vocab,dim=dim)
    model = EncoderDecoder(
        encoder,
        decoder,
        nn.Sequential(src_embed,position),
        nn.Sequential(tgt_embed,tgt_position),
        generator
    )
    return model
    
    

 


if __name__ == '__main__':
    
    model = make_model(100,200)
    # print(model)
    
    input = torch.randint(0,100,(10,10))
    output = torch.randint(0,200,(10,10))
    res=model(input,None,output,None,None)
    print(res.shape)
    # print(torch.sum(res,dim=-1))
    print(output)
    print(res)
    print('1','2')
    
    
    quit()
    # 单次数 100  维度512
    dim = 512
    medim_dim = 64
    max_length = 100
    num_head = 8
    
    # embedding层
    x_encoder = torch.randint(0,100, (5,10))
    print('最原始的输入',x_encoder)
    x_encoder_init = copy.deepcopy(x_encoder)
    
    em = Embeddings(max_length,dim)
    em_decode = copy.deepcopy(em)
    x_encoder = em(x_encoder)
    print(x_encoder.size()) # (5,10,512)
    
    # 位置编码
    position = PositionalEncoding(dim=dim)
    for name,content in position.named_parameters():
        print(name,content.size())
    x_encoder = position(x_encoder)
    print(x_encoder.size())
    
    x=x_encoder
    # 注意力机制
    mask = torch.zeros(5,num_head,10,10)
    # mask = None
    
    # atten,_ = Attention(x,x,x,mask=mask)
    # print(atten)
    # print(atten.size())
    # print(_.float())
    
    # 多头注意力机制
    mulAttention = MultiHeaderAttention(dim=dim,num_head=num_head)
    y = mulAttention(x,x,x,mask=mask)
    print('多头注意力',y.size())
    # xw.view(pd.DataFrame(p_atten.detach().numpy()[0][0]))
    
    # 前馈全连接层
    feed_forward = PositionWiseFeedForward(dim,medim_dim)
    y = feed_forward(y)
    print('feed_forward',y.size())
    
    # 子连接层
    sublayer = Sublayer(dim=dim)
    y = sublayer(x_encoder,lambda x:mulAttention(x,x,x,mask=mask))
    print('子连接层',y.size())
    
    # 编码层-单层
    encoderLayer = EncoderLayer(mulAttention,feed_forward,dim=512)
    y = encoderLayer(x_encoder,mask)
    print('编码层-单层',y.size())
    
    # 编码层-多层
    encoder = Encoder(encoderLayer,n=6,dim=dim)
    y = encoder(x_encoder,mask=None)
    print('编码层-多层',y.size())
    
    # 最后的结果测试采用和输入同一个数据源，仅作为测试使用
    y_encoder_init = copy.deepcopy(x_encoder_init)
    y_encoder = em_decode(y_encoder_init)
    # 解码器-单层
    decoderlayer = DecoderLayer(mulAttention,feed_forward,dim=dim)
    output = decoderlayer(y_encoder,y,first_mask=mask,last_mask=mask)
    print('解码器-单层',output.size())
    
    # 解码器-多层
    decoder = Decoder(decoderlayer,n=4,dim=dim)
    decoder_output = decoder(y_encoder,y,mask,mask)
    print('解码器-多层',decoder_output.size())
    
    # 输出层
    genaration = Gennaration(vocab_size=2500,dim=512)
    y = genaration(decoder_output)
    print('输出层',y.size())
    
    
    
    # 编码器和解码器合并
    encoder_decoder = EncoderDecoder(encoder,decoder,em,em_decode,genaration)
    # x_input,x_mask,y_input,first_mask,last_mask
    
    mask_decode_y = copy.deepcopy(mask)
    # 只保留 前面的数据，遮蔽后面的内容
    mask_decode_y = 1-torch.triu(mask_decode_y,diagonal=1)
    r = encoder_decoder(x_encoder_init,mask,y_encoder_init,mask_decode_y,None)
    print('编码器和解码器合并',r.size())