import torch
import torch.nn as nn
from dataset import MyPairDataset
from torch.utils.data import DataLoader
from get_dict import get_data

# 调用获取数据函数
english_word2index, english_index2word, english_word_n, \
    french_word2index, french_index2word, french_word_n, my_pairs = get_data()


# GRU编码器
class MyEncoderGRU(nn.Module):
    def __init__(self, vocab_size, hidden_size):
        super(MyEncoderGRU, self).__init__()
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size

        # 定义词嵌入层
        self.embedding = nn.Embedding(num_embeddings=vocab_size, embedding_dim=hidden_size)

        # 定义GRU层
        self.gru = nn.GRU(input_size=hidden_size, hidden_size=hidden_size, num_layers=1, batch_first=True)

    def forward(self, x, hidden):
        # 经过词嵌入层  [1, 6] ---> [1, 6, hidden_size]
        embed = self.embedding(x)

        # 经过GRU层
        output, hidden = self.gru(embed, hidden)

        return output, hidden

    def init_hidden(self):
        return torch.zeros(1, 1, self.hidden_size)


if __name__ == '__main__':
    # 实例化dataset
    my_dataset = MyPairDataset(my_pairs)

    # 创建dataloader
    dataloader = DataLoader(dataset=my_dataset, batch_size=1, shuffle=True)

    for x, y in dataloader:
        print('特征x和目标y:', x, y)
        # 实例化编码器模型
        my_encoder_gru = MyEncoderGRU(vocab_size=english_word_n, hidden_size=256)
        print('编码器模型结构:', my_encoder_gru)

        # hidden初始化
        hidden = my_encoder_gru.init_hidden()
        # 传入编码器模型
        output, hidden = my_encoder_gru(x, hidden)
        print('编码器输出的形状:', output.shape)
        print('编码器输出的结果:', output)
        print('编码器hidden的形状:', hidden.shape)
        print('编码器hidden的结果:', hidden)

        print('=============x的形状变换=============')
        print('经过dataset', my_dataset[0][0].shape)
        print('经过dataloader', x.shape)
        print('经过编码器模型', output.shape)
        break
