import torch
import torch.nn as nn
import math
from torch.autograd import Variable
import torch.nn.functional as F
import matplotlib.pyplot as plt
import numpy as np
import copy

def clones(module,N):
    return nn.ModuleList([copy.deepcopy(module) for _ in range(N)])

class Embeddings(nn.Module):

    def __init__(self,d_model,vocab):
        super(Embeddings,self).__init__()
        self.d_model = d_model
        self.vocab = vocab
        self.lut = nn.Embedding(self.vocab,self.d_model)

    def forward(self,x):
        return self.lut(x) * math.sqrt(self.d_model)

class PositionalEncoding(nn.Module):
    def __init__(self, d_model, dropout=0.1, max_len=5000):
        super(PositionalEncoding,self).__init__()
        self.dropout = dropout

        pe = torch.zeros(max_len,d_model)
        position = torch.arange(0,max_len).unsqueeze(1)
        div_term = torch.exp( torch.arange(0,d_model,2) * -(math.log(10000.0) / d_model) )
        my_matmulres = position * div_term
        pe[:, 0::2] = torch.sin(my_matmulres)
        pe[:, 1::2] = torch.cos(my_matmulres)

        pe = pe.unsqueeze(0)
        self.register_buffer('pe',pe)

    def forward(self,x):
        tmp = x.size()[1]
        x = x + Variable(self.pe[:,:x.size()[1]],requires_grad = False)
        return x

class MultiHeadedAttention(nn.Module):

    def __init__(self,head,embedding_dim,dropout=0.1):
        super(MultiHeadedAttention,self).__init__()
        self.d_k = embedding_dim // head
        self.head = head
        self.linears = clones(nn.Linear(embedding_dim,embedding_dim),4)
        self.attn = None
        self.dropout = nn.Dropout(p=dropout)

    def forward(self,query,key,value,mask=None,dropout=0.1):
        if mask is not None:
            mask = mask.unsqueeze(0)
        batch_size = query.size()[0]
        query,key,value = [model(x).view(batch_size,-1,self.head,self.d_k).transpose(1,2) for model,x in zip(self.linears,(query,key,value))]
        x,self.attn = attention(query,key, value, mask=mask)
        x = x.transpose(1,2).contiguous().view(batch_size,-1,self.head*self.d_k)
        x = self.linears[-1](x)
        return x


class PositionwiseFeedForward(nn.Module):

    def __init__(self,d_model,d_ff,dropout=0.1):
        super(PositionwiseFeedForward,self).__init__()
        self.w1 = nn.Linear(d_model,d_ff)
        self.w2 = nn.Linear(d_ff,d_model)
        self.dropout = nn.Dropout(p=dropout)

    def forward(self,x):
        x = self.w1(x)
        x = F.relu(x)
        x = self.dropout(x)
        x = self.w2(x)
        return x


def attention(query,key,value,mask=None,dropout=None):
    d_k = query.size()[-1]
    scores = torch.matmul(query,key.transpose(-1,-2))/math.sqrt(d_k)
    if mask is not None:
        scores = scores.masked_fill(mask==0,-1e9)
    p_attn = F.softmax(scores, dim=-1)
    if dropout is not None:
        p_attn = dropout(p_attn)
    return torch.matmul(p_attn,value),p_attn


class LayerNorm(nn.Module):

    def __init__(self,features,eps=1e-6):
        super(LayerNorm,self).__init__()
        self.a2 = nn.Parameter(torch.ones(features))
        self.b2 = nn.Parameter(torch.zeros(features))
        self.eps = eps

    def forward(self,x):
        mean = x.mean(dim=-1,keepdim=True)
        std = x.std(dim=-1,keepdim=True)
        x = self.a2 * (x - mean)/(std +self.eps) + self.b2
        return x


class SublayerConnection(nn.Module):

    def __init__(self, size, dropout=0.1):
        super(SublayerConnection,self).__init__()
        self.norm = LayerNorm(features=size)
        self.dropout = nn.Dropout(p=dropout)

    def forward(self, x, sublayer):
        x = x + self.dropout(sublayer(self.norm(x)))
        return x

class EncoderLayer(nn.Module):

    def __init__(self,size,self_attn,feed_forward,dropout):
        super(EncoderLayer,self).__init__()
        self.self_attn = self_attn
        self.feed_forward = feed_forward
        self.size = size
        self.sublayer = clones(SublayerConnection(size,dropout),2)

    def forward(self,x,mask):
        x = self.sublayer[0](x,lambda x:self.self_attn(x,x,x,mask))
        x = self.sublayer[1](x,self.feed_forward)
        return x

class Encoder(nn.Module):
    def __init__(self,layer,N):
        super(Encoder,self).__init__()
        self.layers = clones(layer,N)
        self.norm = LayerNorm(layer.size)

    def forward(self,x,mask):
        for layer in self.layers:
            x = layer(x,mask)
        x = self.norm(x)
        return x


class DecoderLayer(nn.Module):
    def __init__(self,size,self_attn,src_attn,feed_forward,dropout):
        super(DecoderLayer,self).__init__()
        self.size = size
        self.self_attn = self_attn
        self.src_attn = src_attn
        self.feed_forward = feed_forward
        self.sublayer = clones(SublayerConnection(size,dropout),3)

    def forward(self,x,memory,sorce_mask,target_mask):
        self.sublayer[0](x,lambda x:self.self_attn(x,x,x,target_mask))
        self.sublayer[1](x,lambda x:self.src_attn(x,memory,memory,sorce_mask))
        self.sublayer[2](x,self.feed_forward)
        return x


class Decoder(nn.Module):

    def __init__(self,layer,N):
        super(Decoder,self).__init__()
        self.layers = clones(layer,N)
        self.norm = LayerNorm(layer.size)

    def forward(self,x,memory,sorce_mask,target_mask):
        for layer in self.layers:
            x = layer(x,memory,sorce_mask,target_mask)
        x = self.norm(x)
        return x


class Generator(nn.Module):

    def __init__(self,d_model,vocab_size):
        super(Generator,self).__init__()
        self.project = nn.Linear(d_model,vocab_size)

    def forward(self,x):
        x = self.project(x)
        x = F.log_softmax(x,dim=-1)
        return x


class EncoderDecoder(nn.Module):

    def __init__(self,encoder,decoder,source_embed,target_embed,generator):
        super(EncoderDecoder,self).__init__()
        self.encoder = encoder
        self.decoder = decoder
        self.source_embed = source_embed
        self.target_embed = target_embed
        self.generator = generator

    def forward(self,source,target,source_mask,target_mask):
        memory = self.encoder(self.source_embed(source),source_mask)
        x = self.decoder(self.target_embed(target),memory,source_mask,target_mask)
        x = self.generator(x)
        return x


def make_model(source_vocab,target_vocab,N=6,d_model=512,d_ff =2048,head=8,dropout=0.1):
    c = copy.deepcopy
    attn = MultiHeadedAttention(head=head,embedding_dim=d_model,dropout=dropout)
    ff = PositionwiseFeedForward(d_model=d_model,d_ff=d_ff,dropout=dropout)
    position = PositionalEncoding(d_model=d_model,dropout=dropout)

    model = EncoderDecoder(
        Encoder(EncoderLayer(d_model,c(attn),c(ff),dropout),N),
        Decoder(DecoderLayer(d_model,c(attn),c(attn),c(ff),dropout),N),
        nn.Sequential(Embeddings(d_model,source_vocab),c(position)),
        nn.Sequential(Embeddings(d_model,target_vocab),c(position)),
        Generator(d_model,target_vocab)
    )

    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
    return model


def dm12_test_make_model():
    source_vocab = 500
    target_vocab = 1000
    N = 6

    my_transform_modelobj = make_model(source_vocab,target_vocab,
                                       N=6,d_model=512,d_ff=2048,head=8,dropout=0.1)
    print(my_transform_modelobj)
    source = target = Variable(torch.LongTensor([[1,2,3,8],[3,4,1,8]]))
    source_mask = target_mask = Variable(torch.zeros(8,4,4))
    mydata = my_transform_modelobj(source,target,source_mask,target_mask)
    print('mydata.shape--->',mydata.shape)
    print('mydata--->',mydata)


def dm10_test_Decoder():
    pe_result = torch.randn(2,4,512)
    source_mask = Variable(torch.zeros(8,4,4))
    target_mask = Variable(torch.zeros(8,4,4))
    self_attn = src_attn = MultiHeadedAttention(8,512,0.1)
    ff = PositionwiseFeedForward(512,1024)
    my_decoderlayer = DecoderLayer(512,self_attn,src_attn,ff,0.1)
    # print('my_decoderlayer-->',my_decoderlayer)
    memory = torch.randn(2,4,512)
    my_decoder = Decoder(my_decoderlayer,6)
    print('my_decoder-->',my_decoder)
    dl_result = my_decoder(pe_result,memory,source_mask,target_mask)
    print('dl_result-->',dl_result.shape,dl_result)

def dm09_test_decoderLayer():
    pe_result = torch.randn(2,4,512)
    source_mask = Variable(torch.zeros(8,4,4))
    target_mask = Variable(torch.zeros(8,4,4))
    self_attn = src_attn = MultiHeadedAttention(8,512,0.1)
    ff = PositionwiseFeedForward(512,1024)
    my_decoderlayer = DecoderLayer(512,self_attn,src_attn,ff,0.1)
    print('my_decoderlayer-->',my_decoderlayer)
    memory = torch.randn(2,4,512)
    dl_result = my_decoderlayer(pe_result,memory,source_mask,target_mask)
    print('dl_result-->',dl_result.shape,dl_result)

def dm08_test_Encoder():
    c = copy.deepcopy
    pe_result = torch.randn(2, 4, 512)
    mask = Variable(torch.zeros(8, 4, 4))
    my_mha = MultiHeadedAttention(8, 512, 0.1)
    d_model, d_ff = 512, 1024
    my_positionwisefeedforward = PositionwiseFeedForward(d_model, d_ff)
    my_encoderlayer = EncoderLayer(512, c(my_mha), c(my_positionwisefeedforward), 0.1)
    my_encoder = Encoder(my_encoderlayer, 6)
    print('my_encoder-->', my_encoder)
    encoder_result = my_encoder(pe_result,mask)
    print('encoder_result-->',encoder_result.shape, encoder_result)

def dm07_test_EncoderLayer():
    pe_result = torch.randn(2, 4, 512)
    mask = Variable(torch.zeros(8,4,4))
    my_mha = MultiHeadedAttention(8,512,0.1)
    d_model,d_ff = 512,1024
    my_positionwisefeedforward = PositionwiseFeedForward(d_model,d_ff)
    my_encoderlayer = EncoderLayer(512,my_mha,my_positionwisefeedforward,0.1)
    print('my_encoderlayer',my_encoderlayer)
    x =  my_encoderlayer(pe_result,mask)
    print('x-->',x.shape,x)

def dm02_test_MultiHeadedAttention():
    d_model = 512
    vocab = 1000

    pe_result = torch.randn(2,4,512)

    head = 8
    dropout = 0.1
    query = key = value = pe_result

    mask = Variable(torch.zeros(8, 4, 4))

    mha_obj = MultiHeadedAttention(head=8,embedding_dim=512,dropout=0.1)
    print('mha_obj-->',mha_obj)
    x = mha_obj(query,key,value,mask)
    print('多头注意机制后的x',x.shape,'\n',x)
    print('多头注意力机制的注意力权重分布',mha_obj.attn.shape)

def dm04_test_PositionwiseFeedForward():
    pff = PositionwiseFeedForward(512,1024)
    print('pff-->',pff)
    x = torch.randn(2,4,512)
    x = pff(x)
    print('x-->',x.shape,x)

def dm05_test_LayerNorm():
    mylayernorm = LayerNorm(512)
    print('mylayernorm-->',mylayernorm)

    pe_result = torch.randn(2,4,512)
    layernorm_result = mylayernorm(pe_result)
    print('layernorm-->',layernorm_result,layernorm_result.shape)

def dm06_test_SublayerConnection():
    size = 512
    my_sub = SublayerConnection(size)
    print('my_sub-->',my_sub)
    x = torch.randn(2,4,512)
    mask = Variable(torch.zeros(8,4,4))
    my_mha = MultiHeadedAttention(8,512,0.1)
    sublayer = lambda x:my_mha(x,x,x,mask)
    x = my_sub(x,sublayer)
    print('x-->',x.shape,x)

def dm11_test_Generator():
    my_generator = Generator(512,1000)
    x = torch.randn(2,4,512)
    generator_result = my_generator(x)
    print('generator-->',generator_result.shape,generator_result)

if __name__ == '__main__':
    dm12_test_make_model()

