# AttentionDecoder部分
class AttentionDecoder(nn.Module):
    def __init__(self):
        super(AttentionDecoder, self).__init__()
        self.embedding = nn.Embedding(cn_vocab_size,embedding_size) 
        self.lstm = nn.LSTM(embedding_size + hidden_size,hidden_size,batch_first=True)
        # 完成注意力权重
        self.attention_linear1=nn.Linear(hidden_size * 2,  hidden_size) 
        self.attention_linear2=nn.Linear(hidden_size, 1)

        # for computing output logits
        self.outlinear = nn.Linear(hidden_size, cn_vocab_size)

def forward(self, decoder_x,previous_hidden,previous_cell,encoder_outputs): 
#元素复制
        previous_hidden_repeat =torch.tile(input=previous_hidden,dims=(1,MAX_LEN+1,1))
        # previous_hidden_repeat:(16,11,256) 

        attention_inputs = torch.cat((encoder_outputs,previous_hidden_repeat), dim=-1)

        # attention_inputs :(16,11,512)
        attention_hidden = self.attention_linear1(attention_inputs) 
        # attention_hidden :(16,11,256) 
        attention_hidden =  torch.Tanh(attention_hidden)
        attention_logits = self.attention_linear2(attention_hidden) 
        # attention_logits :(16,11,1) 
        attention_score = torch.squeeze(attention_logits) 
        # attention_score :(16,11)
        # squeeze主要裁剪的是尺寸为1的维度,
        # 如一个shape为(5,1,10)的矩阵-->(5,10)
        attention_weights = nn.Softmax(dim=1)(attention_score) 

        # tensor_1.expand_as(tensor_2) 
        # 把tensor_1扩展成和tensor_2一样的形状
        att_w=torch.unsqueeze(attention_weights, dim=-1). 
        attention_weights= att_w.expand_as(encoder_outputs)
        # attention_weights :(16,11,256)

        context_vector = torch.multiply(encoder_outputs, attention_weights)
        # encoder_outputs:(16,11,256) 
        context_vector = torch.sum(context_vector, dim=1)
        context_vector = torch.unsqueeze(context_vector, dim=1)
        input = self.embedding(decoder_x)  # (batch,1,128)
        lstm_input=torch.cat((input,context_vector),dim=-1) 
        #  (batch_Size,seq_len,input_size) (16,1,384)
        # lstm_input = torch.permute(lstm_input, (1, 0, 2))
        # LSTM requirement to previous hidden/state:
        # (number_of_layers * direction, batch, hidden)     
        previous_hidden = torch.permute(previous_hidden, (1,0,2)) 
        # previous_hidden :(1,16,256) 
        previous_cell=torch.permute(previous_cell,[1,0,2]) 
        x,(hidden,cell)= self.lstm(lstm_input,(previous_hidden, previous_cell))
        hidden = torch.permute(hidden, [1, 0, 2])  
        cell = torch.permute(cell, [1, 0, 2]) 
       # [num_layers , batch_size, hidden_size] 转化为
        # [batch_size , num_layers, hidden_size]

        output = self.outlinear(hidden)
        output = torch.squeeze(output)
        return output, (hidden, cell) 
