import torch
import os
from torch.utils.data import DataLoader
import numpy as np
from LoadData import DataSet

input_size = 5
output_size = 3
learning_rate = 0.00001
EPOCH = 10

# Traverses the specified directory to display all filenames under the directory.
def eachFile(filepath):
    result = []
    pathDir = os.listdir(filepath)
    for allDir in pathDir:
        child = os.path.join('%s%s' % (filepath, allDir))
        result.append(child)
    return result

# Read the contents of the file and save it.
def readFile(filename,x,y):
    fopen = open(filename, 'r')
    for eachLine in fopen:
        eachLineData = eachLine.split(" ")
        #Take out the x and y data
        x_line = []
        for i in range(0,len(eachLineData)-3):
            x_line.append(float(eachLineData[i]))
        x.append(np.array(x_line))

        y_line = []
        for i in range(len(eachLineData)-3, len(eachLineData)):
            y_line.append(float(eachLineData[i]))
        y.append(np.array(y_line))
    fopen.close()
    return x,y

def getData(train_files_dic,test_files_dic):
    train_files_path = eachFile(train_files_dic)
    test_files_path = eachFile(test_files_dic)

    train_data_x = []
    train_data_y = []
    for train_file in train_files_path:
        train_data_x,train_data_y = readFile(train_file,train_data_x,train_data_y)
    train_data_x = np.array(train_data_x)
    train_data_y = np.array(train_data_y)

    test_data_x = []
    test_data_y = []
    for test_file in test_files_path:
        test_data_x,test_data_y = readFile(test_file,test_data_x,test_data_y)
    test_data_x = np.array(test_data_x)
    test_data_y = np.array(test_data_y)

    return train_data_x,train_data_y,test_data_x,test_data_y

#Normalized data
def normalizedData(train_data_x,train_data_y,test_data_x,test_data_y):
    for i in range(train_data_x.shape[1]):
        max_data = np.max([np.max(train_data_x[:,i]),np.max(test_data_x[:,i])])
        train_data_x[:, i]/=max_data
        test_data_x[:, i]/=max_data
    for i in range(train_data_y.shape[1]):
        max_data = np.max([np.max(train_data_y[:, i]), np.max(test_data_y[:, i])])
        train_data_y[:, i] /= max_data
        test_data_y[:, i] /= max_data

    return train_data_x,train_data_y,test_data_x,test_data_y


if __name__ == '__main__':

    # all trian data and test data files path
    train_files_dic = "./data/traintxt/"
    test_files_dic = "./data/testtxt/"

    # Get train data and test data
    train_data_x,train_data_y,test_data_x,test_data_y = getData(train_files_dic,test_files_dic)

    # Print the size of train data and test data
    print("Train data and test data shape:")
    print(train_data_x.shape)
    print(train_data_y.shape)
    print(test_data_x.shape)
    print(test_data_y.shape)

    # Normalized data
    train_data_x, train_data_y, test_data_x, test_data_y = normalizedData(train_data_x,train_data_y,test_data_x,test_data_y)

    train_data_x = torch.Tensor(train_data_x)
    train_data_y = torch.Tensor(train_data_y)
    test_data_x = torch.Tensor(test_data_x)
    test_data_y = torch.Tensor(test_data_y)

    trainset = DataSet(train_data_x,train_data_y)
    trainloader = DataLoader(trainset, batch_size=1, shuffle=False)

    # Load the LSTM model
    lstm = torch.load('model.pth')

    loss = 0
    for tx, ty in trainloader:
        output = lstm(torch.unsqueeze(tx, dim=0))
        output = torch.reshape(output,[output.shape[1],output.shape[2]])
        loss += torch.sum(torch.abs(output-ty))/torch.sum(ty)

    loss/=len(trainloader)
    print("误差率: "+str(float(loss*100))[:8]+"%")
