import torch
import torch.nn as nn
from torch.nn import Dropout
import torch.nn.functional as F
import math
from a_1_embedding import Embedding
import copy
import time

Device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(Device)


from a_3_attention_self import MultiAttention
from a_4_laynormal import Laynormal
from a_5_feedforward import FeedForward
from a_2_position_embeding import TransformEmbedding

class EncoderLayer(nn.Module):
    def __init__(self,d_model,middle_dim,n_head,drop=0.1):
        super(EncoderLayer, self).__init__()
        self.attention = MultiAttention(d_model, n_head, drop)
        self.laynormal_1 = Laynormal(d_model)
        self.feed_forward = FeedForward(d_model, middle_dim, drop)
        self.laynorma_2 = copy.deepcopy(self.laynormal_1)
        self.dropout_1 = Dropout(drop)
        self.dropout_2 = copy.deepcopy(self.dropout_1)
          
        
    def forward(self, x,mask):
        x = x.to(Device)
        x = self.laynormal_1(x+self.dropout_1(self.attention(x,x,x,mask)))
        x = self.laynorma_2(x+self.dropout_2(self.feed_forward(x)))
        return x


class Encoder(nn.Module):
    def __init__(self, vocab_size,d_model,middle_dim,n_head,n_layer,drop=0.1):
        super(Encoder, self).__init__()
        self.embedding = TransformEmbedding(vocab_size,d_model,drop)
        self.layers = nn.ModuleList([EncoderLayer(d_model,middle_dim,n_head,drop) for _ in range(n_layer)])
    
    
    def forward(self,x,mask):
        x = x.to(Device)
        x = self.embedding(x)
        for layer in self.layers:
            x = layer(x,mask)
        return x



class DecoderLayer(nn.Module):
    def __init__(self,d_model,middle_dim,n_head,drop=0.1):
        super(DecoderLayer, self).__init__()
        self.attention = MultiAttention(d_model, n_head, drop)
        self.laynormal_1 = Laynormal(d_model)
        self.feed_forward = FeedForward(d_model, middle_dim, drop)
        
        self.laynormal_2 = copy.deepcopy(self.laynormal_1)
        self.laynormal_3 = copy.deepcopy(self.laynormal_1)
        
        self.dropout_1 = Dropout(drop)
        self.dropout_2 = copy.deepcopy(self.dropout_1)
        self.dropout_3 = copy.deepcopy(self.dropout_1)

    def forward(self, dec,enc,mask_self,mask_cross):
        dec = dec.to(Device)
        enc = enc.to(Device)
        _x = dec.to(Device)
        
        q = self.laynormal_1(_x+self.dropout_1(self.attention(dec,dec,dec,mask_self)))
        _x = q
        x = self.laynormal_2(_x+self.dropout_2(self.attention(q,enc,enc,mask_cross)))
        x = self.laynormal_3(x+self.dropout_3(self.feed_forward(x)))
        return x
        
        
class Decoder(nn.Module):
    def __init__(self, vocab_size,d_model,middle_dim,n_head,n_layer,drop=0.1):
        super(Decoder, self).__init__()
        self.embedding = TransformEmbedding(vocab_size,d_model,drop)
        self.layers = nn.ModuleList([DecoderLayer(d_model,middle_dim,n_head,drop) for _ in range(n_layer)])
        self.fc = nn.Linear(d_model,vocab_size,device=Device)
    
    def forward(self,dec,enc,mask_self,mask_cross):
        dec = dec.to(Device)
        dec = self.embedding(dec)
        for layer in self.layers:
            dec = layer(dec,enc,mask_self,mask_cross)
        # 5 10 512 @ 512 100  -> 5 10 100
        return self.fc(dec)

        


if __name__ == '__main__':
    t = time.time()
    # encoder
    x = torch.randint(0, 100, (5, 10))
    mask = torch.triu(torch.ones(5, 10), diagonal=1)==0
    mask = mask.view(5,1,1,10)
    mask = mask.to(Device)
    encoder = Encoder(100,512,128,8,6)
    enc = encoder(x,mask)
    
    # decoder
    mask_self = None
    mask_cross = None
    dec = torch.randint(0, 200, (5, 10))
    decoder = Decoder(200,512,128,8,6)
    output = decoder(dec,enc,mask_self,mask_cross)
    print(output.size())
    interval = time.time()-t
    print(f'{interval:.2f}') # (5,10,100)
    
    # loss
    log_softmax = F.log_softmax(output, dim=-1)
    loss = nn.NLLLoss()
    loss_value = loss(log_softmax.transpose(2,1), dec)
    print('lossValue',loss_value)
    print('type',type(loss_value.detach().numpy() + 0  ))
    
    
    
    
    

