import numpy as np
import torch
import torch.nn as nn


class custom_model(nn.Module):
    def __init__(self, input_size, hidden_size, num_layers, output_size):
        super(custom_model, self).__init__()
        # batch_first只影响input, output的维度表示, 中间的hidden, cell的维度还是[seq, batch, feature_size]
        self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_layers, batch_first=True)
        self.predict = nn.Linear(hidden_size, output_size)
        self.num_layers, self.hidden_size, self.output_size = num_layers, hidden_size, output_size

    def all_hidden_vectors(self, poem, device):
        bs = poem.size()[0]
        # 默认为0, 不用特别初始化
        h_0 = torch.zeros(self.num_layers, bs, self.hidden_size).to(device)
        c_0 = torch.zeros(self.num_layers, bs, self.hidden_size).to(device)
        # o 是每个时刻最上层的输出(concat了)
        # _ 是最后时刻每一层的输出(没有concat)
        o, _ = self.lstm(poem, (h_0, c_0))
        return o

    def forward(self, poem, device):
        # o = batch_size, seq_len, hidden_dim
        # output = batch_size, seq_len, output_size
        # prob = batch_size, seq_len, output_size
        o = self.all_hidden_vectors(poem, device)
        output = self.predict(o)

        return output


class one2many(nn.Module):
    def __init__(self, input_size, hidden_size, output_size):
        super(one2many, self).__init__()
        # batch_first只影响input, output的维度表示, 中间的hidden, cell的维度还是[seq, batch, feature_size]
        self.lstm1 = nn.LSTMCell(input_size=input_size, hidden_size=hidden_size)
        self.lstm2 = nn.LSTMCell(input_size=hidden_size, hidden_size=hidden_size)
        self.prePredict = nn.Linear(hidden_size, hidden_size)
        self.predict = nn.Linear(hidden_size, output_size)
        self.changeYsize = nn.Linear(output_size, hidden_size)
        self.hidden_size, self.input_size, self.output_size = \
            hidden_size, input_size, output_size
        self.rl = nn.ReLU()

    def lstm1time(self, hatY, h1, h2, c1, c2):
        # poem: batch_size, wv_dim
        h, c = self.lstm1(hatY, (h1, c1))
        h, c = self.rl(h), self.rl(c)
        h_final, c_final = self.lstm2(h, (h2, c2))
        return h, c, h_final, c_final

    def giveout(self, h):
        out = self.prePredict(h)
        out = self.rl(out)
        out = self.predict(out)
        return out

    # forward考虑batch
    def forward(self, poem, device):
        # o = batch_size, seq_len, hidden_dim
        # output = batch_size, seq_len, output_size
        # prob = batch_size, seq_len, output_size
        bs, length = poem.size()[:2]
        h1 = torch.zeros([bs, self.hidden_size]).to(device)
        h2 = torch.zeros([bs, self.hidden_size]).to(device)
        c1 = torch.zeros([bs, self.hidden_size]).to(device)
        c2 = torch.zeros([bs, self.hidden_size]).to(device)

        h1, c1, h2, c2 = self.lstm1time(poem[:, 0, :], h1, h2, c1, c2)
        hatY = self.giveout(h2)
        zeroVec = torch.zeros([bs, self.input_size])
        out = torch.zeros([bs, length, self.output_size])
        for i in range(1, length):
            x = poem[:, i, :]
            # catVec = torch.concatenate((hatY, x), dim=1).to(device)
            # real_input = zeroVec
            real_input = self.changeYsize(hatY) + x
            h1, c1, h2, c2 = self.lstm1time(real_input, h1, h2, c1, c2)
            hatY = self.giveout(h2)
            # out.append(hatY.clone().detach().numpy())
            out[:, i, :] = hatY
        return out

    # 推断时initial不考虑batch
    def inferByInitial(self, initial, stopId, device):
        with torch.no_grad():
            h1 = torch.zeros([1, self.hidden_size]).to(device)
            h2 = torch.zeros([1, self.hidden_size]).to(device)
            c1 = torch.zeros([1, self.hidden_size]).to(device)
            c2 = torch.zeros([1, self.hidden_size]).to(device)

            h1, c1, h2, c2 = self.lstm1time(initial, h1, h2, c1, c2)
            hatY = self.giveout(h2)
            curId = np.argmax(hatY.detach().numpy())
            ct = 0
            # x = torch.zeros([1, self.input_size]).to(device)
            out = [curId]
            while curId not in stopId and ct <= 60:
                # catVec = torch.concatenate((hatY, x), dim=1)
                # real_input = x
                real_input = self.changeYsize(hatY)
                h1, c1, h2, c2 = self.lstm1time(real_input, h1, h2, c1, c2)
                hatY = self.giveout(h2)
                curId = np.argmax(hatY.detach().numpy())
                out.append(curId)
                ct += 1
            return out

