import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import numpy as np
from torch.autograd import Variable
from one_embedding import Embeddings
from two_position import PositionEmbeddings
import copy

from three_attention import MultiAttention

class Linear(nn.Module):
    def __init__(self,dim,middle_dim,dp=0.1):
        super().__init__()
        self.liner1 = nn.Linear(dim,middle_dim)
        self.liner2 = nn.Linear(middle_dim,dim)
        self.dropout = nn.Dropout(dp)
    
    def forward(self,x):
        return self.liner2(self.dropout(F.relu(self.liner1(x))))
    
    
    
class normalization(nn.Module):
    def __init__(self,dim,eps=1e-9):
        super().__init__()
        self.ones = nn.Parameter(torch.ones(dim))
        self.zeros = nn.Parameter(torch.zeros(dim))
        self.eps = eps
    def forward(self,x):
        # x: [batch_size, seq_len, dim]
        mean = x.mean(dim=-1,keepdim=True)
        std = x.std(dim=-1,keepdim=True)
        
        return self.ones*(x-std)/(mean+self.eps)+self.zeros



    
if __name__ == "__main__":
    # normalization
    
    nor = normalization(512)
    for name,content in nor.named_parameters():
        print(name,content.shape)
    
    
    quit()
    
    # 生成词向量
    embed = Embeddings(100,512)
    input = torch.randint(0,100,size=(5,10))
    output1 = embed(input)# (5,10,512)
    # 添加位置向量
    positionembed = PositionEmbeddings(dim=512)
    output = positionembed(output1)
    print(output.shape) # (5,10,512)
    
    q=k=v=output
    # mask=torch.ones(5,10,10)
    mask=torch.zeros(5,8,10,10) # (10,10) (5,8,10,10) (10,10) (1,1,10,10) (1,10,10) 都可以 只要是能和 (5,8,10,10) 能够进行广播机制都行
    mask = torch.triu(mask,diagonal=0)
    
    # # 注意力机制(单头 自注意力)  (5,10,512)
    # res,_ = attention(q,k,v,mask=None)
    # print(_.int())
    
    # 多头注意力 (5,10,512)
    multiattention = MultiAttention(8,512)
    
    for name ,v1 in multiattention.named_parameters():
        print(name,v1.shape)
    
    res = multiattention(q,k,v,mask=mask)
    print(res)
    print(res.size())
    
    # 线性层
    Liner = Linear(512,64)
    # x = torch.randn(5,10,512)
    res = Liner(res)
    print(res.shape)
    