import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from torch.autograd import Variable
from one_embedding import Embeddings
from two_position import PositionEmbeddings
import copy
from four_linear import normalization,Linear
from three_attention import MultiAttention
from five_sublay import Sublayer


def cloneLay(model,n=2):
    return nn.ModuleList([copy.deepcopy(model) for _ in range(n)])

class Encode(nn.Module):
    def __init__(self,l,m,dim=512,num_heads=8,dp=0.1,middle_dim=64):
        super(Encode,self).__init__()
        # self.num_layers = num_layers
        self.dim = dim
        self.num_heads = num_heads
        self.dp = dp
        self.middle_dim = middle_dim
    
        # self.attention = MultiAttention(num_heads,dim)
        self.sublay = Sublayer(dim,dp)
        self.sublays = cloneLay(self.sublay,n=2)
        self.l = l
        self.m = m

    def forward(self,x,mask=None):
        x = self.sublays[0](x,lambda x1: self.m(x1,x1,x1,mask=mask))
        return self.sublays[1](x,self.l)


class Encoder(nn.Module):
    def __init__(self,layer, n=6):
        super().__init__()
        self.layers = cloneLay(layer,n)
        self.n = normalization(512)
    def forward(self,x):
        for layer in self.layers:
            x = layer(x)
            
        return self.n(x)


if __name__ == '__main__':
    x = torch.randn(4,10,512)

    dim=512
    num_heads = 8
    middle_dim = 64
    mask = None
    
    m = MultiAttention(num_heads,dim)
    l = Linear(dim,middle_dim)
    # m = lambda x:attention(x,x,x,mask=mask)
    encode = Encode(l,m)
    
    encoder = Encoder(encode,6)
    
    for name,t in encoder.named_parameters():
        print(name,t.size())
    y = encoder(x)
    print(y.size())