import torch
import torch.nn as nn
import torch.nn.functional as F


class RNNModel(nn.Module):
    """循环神经网络模型"""

    def __init__(self, input_size, hidden_size, num_layers=1, bidirectional=False, is_gru=True):
        super(RNNModel, self).__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bidirectional = bidirectional
        self.is_gru = is_gru
        if is_gru:
            self.rnn = nn.GRU(input_size=self.input_size,
                              hidden_size=self.hidden_size,
                              num_layers=self.num_layers,
                              bidirectional=self.bidirectional)
        else:
            self.rnn = nn.LSTM(input_size=self.input_size,
                               hidden_size=self.hidden_size,
                               num_layers=self.num_layers,
                               bidirectional=self.bidirectional)
        # 如果RNN是双向的，num_directions应该是2，否则应该是1
        if not self.bidirectional:
            self.num_directions = 1
            self.linear = nn.Linear(self.hidden_size, self.input_size)
        else:
            self.num_directions = 2
            self.linear = nn.Linear(self.hidden_size * 2, self.input_size)

    def forward(self, inputs, state):
        X = F.one_hot(inputs.T.long(), self.input_size)
        X = X.to(torch.float32)
        Y, state = self.rnn(X, state)
        # 全连接层首先将Y的形状改为(时间步数*批量大小,隐藏单元数)
        # 它的输出形状是(时间步数*批量大小,词表大小)。
        output = self.linear(Y.reshape((-1, Y.shape[-1])))
        return output, state

    def init_state(self, batch_size, device):
        if self.is_gru:
            # nn.GRU以张量作为隐状态
            return torch.zeros((self.num_directions * self.num_layers,
                                batch_size, self.hidden_size),
                               device=device,
                               requires_grad=False)
        else:
            # nn.LSTM以元组作为隐状态
            return (
                torch.zeros((self.num_directions * self.num_layers,
                             batch_size, self.hidden_size), device=device, requires_grad=False),
                torch.zeros((self.num_directions * self.num_layers,
                             batch_size, self.hidden_size), device=device, requires_grad=False)
            )

    def loss(self, y_hat, y):
        criterion = nn.CrossEntropyLoss()
        loss = criterion(y_hat, y)
        return loss