# coding=utf-8
import torch
from torch import nn, optim


# 组合模型
class LSTM_FC(torch.nn.Module):
    def __init__(self, hidden_size, num_layers, lstm_num, time_chunk_size, in_dim, fc_hidden_list):
        super(LSTM_FC, self).__init__()
        self.lstm_num = lstm_num
        self.lstm = nn.ModuleList([torch.nn.LSTM(input_size=time_chunk_size, hidden_size=hidden_size,
                                                 num_layers=num_layers) for i in range(self.lstm_num)])

        lstm_out_len = hidden_size
        self.layers = []
        self.layers.append(nn.Sequential(nn.Linear(in_dim+lstm_out_len, fc_hidden_list[0]), nn.ReLU(True)))
        for i in range(len(fc_hidden_list)-1):
            self.layers.append(nn.Sequential(nn.Linear(fc_hidden_list[i], fc_hidden_list[i+1]), nn.ReLU(True)))
        self.layers.append(nn.Sequential(nn.Linear(fc_hidden_list[-1], 1, nn.ReLU(True))))

    def forward(self, x_lstm, x_fc):
        tem_result = []
        for i in range(self.lstm_num):
            out, _ = self.lstm[i](x_lstm[i])
            s, b, h = out.shape
            out = out.view(s * b, h)
            tem_result.append(out)
        tem_result = torch.cat(tem_result, dim=0)
        x_fc = torch.cat([tem_result, x_fc.type_as(tem_result)], dim=1)
        if torch.cuda.is_available():
            x_fc = x_fc.cuda()
        for layer in self.layers:
            x_fc = layer(x_fc)
        return x_fc

    @staticmethod
    def train_and_predict(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test,
                          grid_num, time_chunk_size, hidden_size, num_layers, fc_hideen_list,
                          learning_rate, num_epochs):
        x_train_lstm = torch.from_numpy(x_train_lstm)
        x_train_fc = torch.from_numpy(x_train_fc)
        y_train = torch.from_numpy(y_train)
        x_test_lstm = torch.from_numpy(x_test_lstm)
        x_test_fc = torch.from_numpy(x_test_fc)
        y_test = torch.from_numpy(y_test)

        model = LSTM_FC(hidden_size, num_layers, grid_num, time_chunk_size, x_train_fc.shape[1], fc_hideen_list)
        if torch.cuda.is_available():
            model = model.cuda()
            x_train_lstm = x_train_lstm.cuda()
            x_train_fc = x_train_fc.cuda()
            y_train = y_train.cuda()
            x_test_lstm = x_test_lstm.cuda()
            x_test_fc = x_test_fc.cuda()
            y_test = y_test.cuda()

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        loss_changes = []

        lstm_train_out = None
        for epoch in range(num_epochs):

            lstm_train_out = model.forward(x_train_lstm, x_train_fc)

            loss = criterion(lstm_train_out, y_train.unsqueeze(1))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if epoch % 100 == 0:
                print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.item()))
            # 测试集loss变化
            # lstm_test_out = model.forward(x_test_lstm, x_test_fc)
            # loss_test = criterion(lstm_test_out, y_test.unsqueeze(1))
            # loss_changes.append([loss.item(), loss_test.item()])
            loss_changes.append(loss.item())

        model.eval()

        lstm_test_out = model.forward(x_test_lstm, x_test_fc)
        lstm_test_out = lstm_test_out.cpu().data.numpy()
        lstm_train_out = lstm_train_out.cpu().data.numpy()

        return lstm_train_out, lstm_test_out, loss_changes


# 对所有区块都单独训练一个组合模型
class LSTM_FC_separate(torch.nn.Module):
    def __init__(self, hidden_size, num_layers, lstm_num, time_chunk_size, in_dim, fc_hidden_list):
        super(LSTM_FC_separate, self).__init__()
        self.lstm_num = lstm_num
        self.lstm = torch.nn.LSTM(input_size=time_chunk_size, hidden_size=hidden_size, num_layers=num_layers)

        lstm_out_len = hidden_size

        self.layers = []
        self.layers.append(nn.Sequential(nn.Linear(in_dim+lstm_out_len, fc_hidden_list[0]), nn.ReLU(True)))
        for i in range(len(fc_hidden_list)-1):
            self.layers.append(nn.Sequential(nn.Linear(fc_hidden_list[i], fc_hidden_list[i+1]), nn.ReLU(True)))
        self.layers.append(nn.Sequential(nn.Linear(fc_hidden_list[-1], 1, nn.ReLU(True))))

    def forward(self, x_lstm, x_fc):
        out, _ = self.lstm(x_lstm)
        s, b, h = out.shape
        out = out.view(s * b, h)

        x_fc = torch.cat([out, x_fc.type_as(out)], dim=1)
        if torch.cuda.is_available():
            x_fc = x_fc.cuda()
        for layer in self.layers:
            x_fc = layer(x_fc)
        return x_fc

    @staticmethod
    def train_and_predict(x_train_lstm, x_train_fc, y_train, x_test_lstm, x_test_fc, y_test,
                          grid_num, time_chunk_size, hidden_size, num_layers, fc_hidden_list,
                          learning_rate, num_epochs):
        x_train_lstm = torch.from_numpy(x_train_lstm)
        x_train_fc = torch.from_numpy(x_train_fc)
        y_train = torch.from_numpy(y_train)
        x_test_lstm = torch.from_numpy(x_test_lstm)
        x_test_fc = torch.from_numpy(x_test_fc)
        y_test = torch.from_numpy(y_test)

        model = LSTM_FC_separate(hidden_size, num_layers, grid_num, time_chunk_size, x_train_fc.shape[1], fc_hidden_list)
        if torch.cuda.is_available():
            model = model.cuda()
            x_train_lstm = x_train_lstm.cuda()
            x_train_fc = x_train_fc.cuda()
            y_train = y_train.cuda()
            x_test_lstm = x_test_lstm.cuda()
            x_test_fc = x_test_fc.cuda()
            y_test = y_test.cuda()

        criterion = nn.MSELoss()
        optimizer = optim.Adam(model.parameters(), lr=learning_rate)
        loss_changes = []

        lstm_train_out = None
        for epoch in range(num_epochs):

            lstm_train_out = model.forward(x_train_lstm, x_train_fc)

            loss = criterion(lstm_train_out, y_train.unsqueeze(1))

            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if epoch % 100 == 0:
                print('Epoch[{}/{}], loss:{:.6f}'.format(epoch + 1, num_epochs, loss.item()))
            # 测试集loss变化
            # lstm_test_out = model.forward(x_test_lstm, x_test_fc)
            # loss_test = criterion(lstm_test_out, y_test.unsqueeze(1))
            # loss_changes.append([loss.item(), loss_test.item()])
            loss_changes.append(loss.item())

        model.eval()

        lstm_test_out = model.forward(x_test_lstm, x_test_fc)
        lstm_test_out = lstm_test_out.cpu().data.numpy()
        lstm_train_out = lstm_train_out.cpu().data.numpy()

        return lstm_train_out, lstm_test_out, loss_changes
