from backbones.encoder_decoder import Encoder, Decoder, EncoderDecoder
from torch import nn
import torch


def get_device(device):
    return device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")


class Seq2SeqEncoder(Encoder):
    """
    Seq2SeqEncoder 序列到序列的模型的编码器
    """

    def __init__(self, vocab_size, embedding_dims, hidden_size, num_layers, device=None):
        super().__init__()
        self.device = get_device(device)
        # 注意：词嵌入模型中有：vocab_size * embedding_dims个w参数
        self.embedding = nn.Embedding(vocab_size, embedding_dims, device=device)
        self.gru = nn.GRU(embedding_dims, hidden_size, num_layers, batch_first=True, device=device)

    def forward(self, x):
        x = self.embedding(x)
        # 注意：当GRU只接受了一个参数（输入特征），那么程度内部会自动构建一个全为0的隐藏状态值。
        _, h = self.gru(x)  # 编码器只需要产生隐藏状态
        return h


class Seq2SeqDecoder(Decoder):
    """
    Seq2SeqDecoder 序列到序列的模型的解码器
    """

    def __init__(self, vocab_size, embedding_dims, hidden_size, num_layers, device=None):
        super().__init__()
        self.device = get_device(device)
        self.hidden_size = hidden_size
        # 注意：词嵌入模型中有：vocab_size * embedding_dims个w参数
        self.embedding = nn.Embedding(vocab_size, embedding_dims, device=device)
        self.gru = nn.GRU(embedding_dims, hidden_size, num_layers, batch_first=True, device=device)
        self.fc = nn.Linear(hidden_size, vocab_size, device=device)

    def forward(self, x, h):  # 解码器需要编码器的隐藏状态
        x = self.embedding(x)
        x, _ = self.gru(x, h)
        x = x.reshape(-1, self.hidden_size)
        x = self.fc(x)
        return x