import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from torch.autograd import Variable
from one_embedding import Embeddings
from two_position import PositionEmbeddings
import copy
from four_linear import normalization
from three_attention import MultiAttention

'''

g(x)
f(x) = g(x) + p[g(x)]

f(x)'/g(x) = 1 + p[g(x)]'


'''




class Sublayer(nn.Module):
    def __init__(self,dim,dp=0.1):
        super(Sublayer,self).__init__()
        self.norm = normalization(dim)
        self.dropout = nn.Dropout(dp)
    def forward(self,x,sublayer):
        return x+self.dropout(sublayer(self.norm(x)))


if __name__ == '__main__':
    # 测试 sublayer
    x = torch.randn(4,10,512)
    q=k=v=x
    m = MultiAttention(8,512)
    mask=torch.ones(10,10)
    multiattention = lambda x:m(x,x,x,mask=mask)
    sublay = Sublayer(512)
    res = sublay(x,multiattention)
    print(res.size())
    
    '''
    
    21114-21119,9050,5245,3000,8000,20091
    10.121.1.8
    
    '''