import torch.nn as nn
from get_dict import get_data
import torch
from dataset import MyPairDataset
from torch.utils.data import DataLoader
from encoder import MyEncoderGRU
from until import MAX_LENGTH, SOS_token

# 调用获取数据函数
english_word2index, english_index2word, english_word_n, \
    french_word2index, french_index2word, french_word_n, my_pairs = get_data()


# 带有attention的解码器模型
class AttentionDecoderGRU(nn.Module):
    def __init__(self, vocab_size, hidden_size, dropout_p=0.1, max_len=MAX_LENGTH):
        super(AttentionDecoderGRU, self).__init__()
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.dropout_p = dropout_p
        self.max_len = max_len

        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=self.hidden_size)

        self.dropout = nn.Dropout(p=self.dropout_p)

        # Q和K拼接后需要做的线性变换
        self.attention = nn.Linear(self.hidden_size * 2, max_len)

        # Q和V拼接后需要做的线性变换
        self.attention_combine = nn.Linear(self.hidden_size * 2, self.hidden_size)

        self.gru = nn.GRU(input_size=self.hidden_size, hidden_size=self.hidden_size, num_layers=1, batch_first=True)

        self.linear = nn.Linear(in_features=self.hidden_size, out_features=vocab_size)

        self.softmax = nn.LogSoftmax(dim=-1)

    def forward(self, Q, K, V):
        Q = self.embedding(Q)

        Q = self.dropout(Q)

        QK_cat = self.attention(torch.cat((Q[-1], K[-1]), dim=-1))

        attention_weights = torch.softmax(QK_cat, dim=-1)

        attention_applied = torch.bmm(attention_weights.unsqueeze(0), V.unsqueeze(0))

        # Q 与 V进行拼接
        concat_result = torch.cat((Q[0], attention_applied[0]), dim=-1)

        gru_input = self.attention_combine(concat_result).unsqueeze(0)

        gru_input = torch.relu(gru_input)

        output, hidden = self.gru(gru_input, K)

        result = self.linear(output[-1])

        return self.softmax(result), hidden, attention_weights


if __name__ == '__main__':
    my_dataset = MyPairDataset(my_pairs)

    dataloader = DataLoader(dataset=my_dataset, batch_size=1, shuffle=True)

    for x, y in dataloader:
        print('目标y的形状:', y.shape)

        # 获取编码器最后一个时间步隐藏层输出作为解码器的第一个时间步的隐藏层输入
        my_encoder_gru = MyEncoderGRU(vocab_size=english_word_n, hidden_size=10)
        hidden = my_encoder_gru.init_hidden()
        encoder_output, encoder_hidden = my_encoder_gru(x, hidden)

        my_attention_decoder = AttentionDecoderGRU(vocab_size=french_word_n, hidden_size=10, dropout_p=0.1,
                                                   max_len=MAX_LENGTH)
        print('解码器模型结构:', my_attention_decoder)
        # 2.解码：一个字符一个字符的解码，准备Q、K、V
        # 2.1 准备Q值【开始解码词】  [1,1]
        input_y = torch.tensor([[SOS_token]])
        # 2.2 准备K值：使用编码器的最后一个时间步的hidden  [1,1,256]
        decoder_hidden = encoder_hidden
        # 2.3 准备V值【编码器最后的输出】：统一句子长度  [1,4,256] --> [10,256]
        encoder_output_c = torch.zeros(MAX_LENGTH, my_encoder_gru.hidden_size)
        for i in range(encoder_output.shape[1]):
            if i > MAX_LENGTH - 1:
                break
            encoder_output_c[i] = encoder_output[0, i]
        for i in range(y.shape[1]):
            print(f"========解码第{i + 1}次========")
            # 将Q,K,V数据传入（input_y，decoder_hidden，encoder_output_c）
            output, decoder_hidden, attention_weights = my_attention_decoder(input_y, decoder_hidden, encoder_output_c)
            print(f'解码器输出的形状：-->{output.shape}')
            print(f'解码器输出的结果：-->{output}')
            print(f'解码器decoder_hidden的形状：-->{decoder_hidden.shape}')
            print(f'解码器decoder_hidden的结果：-->{decoder_hidden}')

            input_y = y[0][i].view(1, -1)
        break
