# coding=utf-8
import torch
from torch import nn, optim


# lstm模型
class LSTM(torch.nn.Module):
    def __init__(self, hidden_size, num_layers, lstm_num, time_chunk_size):
        super(LSTM, self).__init__()
        self.lstm_num = lstm_num
        self.lstm = nn.ModuleList([torch.nn.LSTM(input_size=time_chunk_size, hidden_size=hidden_size,
                                                 num_layers=num_layers) for i in range(self.lstm_num)])
        self.reg = nn.ModuleList([nn.Linear(hidden_size, 1) for i in range(self.lstm_num)])

    def forward(self, x):
        tem_result = torch.empty(0)
        if torch.cuda.is_available():
            tem_result = tem_result.cuda()
        for i in range(self.lstm_num):
            out, _ = self.lstm[i](x[i])
            s, b, h = out.shape
            to_fc = out.view(s * b, h)
            out = self.reg[i](to_fc)
            out = out.view(b)
            tem_result = torch.cat((tem_result, out), dim=0)
        return tem_result

    @staticmethod
    def train_and_predict(x_train, y_train, x_test, y_test, grid_num, time_chunk_size,
                          hidden_size, num_layers, learning_rate, num_epochs):
        x_train = torch.from_numpy(x_train)
        y_train = torch.from_numpy(y_train)
        x_test = torch.from_numpy(x_test)
        y_test = torch.from_numpy(y_test)

        model = LSTM(hidden_size, num_layers, grid_num, time_chunk_size)
        if torch.cuda.is_available():
            model = model.cuda()
            x_train = x_train.cuda()
            y_train = y_train.cuda()
            x_test = x_test.cuda()
            y_test = y_test.cuda()

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        loss_changes = []

        lstm_train_out = None
        for epoch in range(num_epochs):

            lstm_train_out = model.forward(x_train)

            loss = criterion(lstm_train_out, y_train)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            # if epoch % 100 == 0:
            print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.item()))
            lstm_test_out = model.forward(x_test)
            loss_test = criterion(lstm_test_out, y_test)
            loss_changes.append([loss.item(), loss_test.item()])

        model.eval()

        lstm_test_out = model.forward(x_test)
        lstm_test_out = lstm_test_out.cpu().data.numpy()
        lstm_train_out = lstm_train_out.cpu().data.numpy()

        return lstm_train_out, lstm_test_out, loss_changes


# 针对单格的lstm模型
class LSTM_separate(torch.nn.Module):
    def __init__(self, hidden_size, num_layers, lstm_num, time_chunk_size):
        super(LSTM_separate, self).__init__()
        self.lstm_num = lstm_num
        self.lstm = torch.nn.LSTM(input_size=time_chunk_size, hidden_size=hidden_size, num_layers=num_layers)
        self.reg = nn.Linear(hidden_size, 1)

    def forward(self, x):
        out, _ = self.lstm(x)
        s, b, h = out.shape
        to_fc = out.view(s * b, h)
        out = self.reg(to_fc)
        out = out.view(b)
        return out, to_fc

    @staticmethod
    def train_and_predict(x_train, y_train, x_test, y_test, grid_num, time_chunk_size,
                          hidden_size, num_layers, learning_rate, num_epochs):
        x_train = torch.from_numpy(x_train)
        y_train = torch.from_numpy(y_train)
        x_test = torch.from_numpy(x_test)
        y_test = torch.from_numpy(y_test)

        model = LSTM_separate(hidden_size, num_layers, grid_num, time_chunk_size)
        if torch.cuda.is_available():
            model = model.cuda()
            x_train = x_train.cuda()
            y_train = y_train.cuda()
            x_test = x_test.cuda()
            y_test = y_test.cuda()

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        loss_changes = []

        lstm_train_out = None
        to_fc_train = None
        for epoch in range(num_epochs):

            lstm_train_out, to_fc_train = model.forward(x_train)

            loss = criterion(lstm_train_out, y_train)

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (epoch+1) % 50 == 0:
                print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.item()))
            lstm_test_out, to_fc_test = model.forward(x_test)
            loss_test = criterion(lstm_test_out, y_test)
            loss_changes.append([loss.item(), loss_test.item()])

        model.eval()

        lstm_test_out, to_fc_test = model.forward(x_test)
        lstm_test_out = lstm_test_out.cpu().data.numpy()
        lstm_train_out = lstm_train_out.cpu().data.numpy()
        to_fc_test = to_fc_test.cpu().data.numpy()
        to_fc_train = to_fc_train.cpu().data.numpy()

        return lstm_train_out, to_fc_train, lstm_test_out, to_fc_test, loss_changes
