import torch
import torch.nn as nn
import numpy as np
import pandas as pd
import os
from torch.utils.data import TensorDataset, DataLoader
from tensorboardX import SummaryWriter
import matplotlib.pyplot as plt
import matplotlib

os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
summaryWriter = SummaryWriter("./runs/")
pic_idx = 0
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

BATCH_SIZE = 64
EPOCH = 300
LR = 0.02


def train(model, device, train_loader, epoch, optimizer, lossFunction):
    global pic_idx
    model.train()
    for idx, (data, label) in enumerate(train_loader):
        data, label = data.to(device), label.to(device)
        output = model(data)
        loss = lossFunction(output, label)
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        summaryWriter.add_scalar("my_lstm", loss.item(), pic_idx)
        pic_idx += 1
        if ((idx % 3) == 0) and (epoch % 100) == 0:
            print("epoch:{},step:{},loss:{}".format(epoch, idx, loss.item()))


def test(model, device, test_loader, lossFunction):
    model.eval()
    all_loss = 0
    out_result = torch.tensor([]).to(device)
    with torch.no_grad():
        for idx, (data, label), in enumerate(test_loader):
            data, label = data.to(device), label.to(device)
            output = model(data)
            out_result = torch.cat((out_result, output.flatten()))
            loss = lossFunction(output, label)
            all_loss += loss.item()
    all_loss /= len(test_loader.dataset)
    print("model_test loss:{}".format(all_loss))
    return out_result


def data_load():
    data_x = np.array(pd.read_csv("./LSTM_pytorch/Data_x.csv", header=None)).astype('float32')
    data_y = np.array(pd.read_csv("./LSTM_pytorch/Data_y.csv", header=None)).astype('float32')

    data_len = len(data_x)

    train_data_radio = 0.8  # 80%的训练数据
    train_data_len = int(train_data_radio * data_len)

    train_loader = DataLoader(TensorDataset(torch.from_numpy(data_x[5:train_data_len]),
                                            torch.from_numpy(data_y[5:train_data_len])),
                              batch_size=BATCH_SIZE,
                              shuffle=True)

    test_loader = DataLoader(TensorDataset(torch.from_numpy(data_x[train_data_len:]),
                                           torch.from_numpy(data_y[train_data_len:])),
                             batch_size=BATCH_SIZE, shuffle=True)
    return train_loader, test_loader


class my_lstm(nn.Module):
    def __init__(self, input_size, hidden_size=1, output_size=1, num_laters=1):
        super().__init__()
        self.lstm_layer = nn.LSTM(input_size=input_size, hidden_size=hidden_size, num_layers=num_laters)
        self.fc = nn.Linear(hidden_size, output_size)

    def forward(self, x):
        x, _ = self.lstm_layer(x)
        x = self.fc(x)
        return x


if __name__ == '__main__':
    train_data, test_data = data_load()
    i = 1
    model = my_lstm(len(train_data.dataset[0][0])).to(DEVICE)
    lossFunction = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=LR)
    for i in range(EPOCH):
        train(model, DEVICE, train_data, i, optimizer, lossFunction)
    test_pre_label = test(model, DEVICE, test_data, lossFunction)

    summaryWriter.close()

    plt.figure()
    plt.plot(torch.linspace(0, 82, 83).view(-1).data.numpy(), test_data.dataset[0:83][1].view(-1).data.numpy(),
             'b', label='y_test')
    plt.plot(torch.linspace(0, 82, 83).view(-1).data.numpy(), test_pre_label.cpu().data.numpy(), 'y--',
             label='pre_test')
    plt.xlabel('t')
    plt.ylabel('Vce')
    plt.show()

    torch.save(model.state_dict(), 'my_lstm.pt')
