# -*- coding: utf-8 -*-
import keras
import numpy as np
import pandas as pd
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, LSTM, Dropout
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.preprocessing import minmax_scale
import matplotlib as mpl
mpl.rcParams['font.size'] = 13
# for chinese fonts display
# plt.rcParams['font.sans-serif']=['SimHei']
# plt.rcParams['axes.unicode_minus'] = False

# Set the lowest value as 0 and highest value as 1
def MinMaxScaler(data, max, min):
    numerator = data - min
    denominator = max - min
    if denominator is 0:
        return 0, max, min
    return numerator / denominator

# Return the scaled value to origin
def MinMaxReturn(data, max, min):
    # 
    # print(data.shape, max, min)
    return data * (max[1] - min[1]) + min[1]
    # return data * (max - min) + min

# denotes input data files name

data_location = 'T73999.csv'
# data_location = 'TP15222.csv'
# data_location = 'tk718.csv'
# data_location = 'tk719.csv'
# data_location = 'tp215.csv'
# data_location = 'tk1078.csv'
# data_location = 'tp308x.csv'


# read input data
raw_data = pd.read_csv(data_location, header=0, usecols=[1, 4])
diff_data = raw_data.diff(periods = 2).fillna(0)

# print(diff_data.max())
raw_data = raw_data.values
diff_data = np.abs(diff_data.values)
# print(diff_data)

max = raw_data.max(axis=0)
min = raw_data.min(axis=0)
print("raw_data of max and min:  ", max, min)
# print(max, min)
# time_series_data1 = MinMaxScaler(raw_data, max, min)
time_series_data = minmax_scale(raw_data, axis=0)
diff_data = minmax_scale(diff_data, axis=0)
time_series_data = np.hstack((diff_data, time_series_data))
print("time_series_data :\n", time_series_data)
print("time_series_data max : \n ", time_series_data.max(axis=0))


# len = raw_data.shape[0]
train = np.array(time_series_data[0: 3200])
test = np.array(time_series_data[3200:])
validation = np.array(time_series_data)[4800:]
x_train = train[:, :4]
print("x_train.shape : ", x_train.shape)
# print("\n", x_train)
y_train = train[:, 3]
print(y_train.shape)
# print("-----------------------------\n", y_train)
x_test = test[:, :4]
print("x_test.shape : ", x_test.shape)
# print("-----------------------------\n", x_test)
y_test = test[:, 3]

x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))
# print("x_train :", x_train)
y_train = np.reshape(y_train, (y_train.shape[0], 1))
# print("y_train :", y_train)
x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
# print("x_test :", x_test)
y_test = np.reshape(y_test, (y_test.shape[0], 1))

# Create the model
model = Sequential()
model.add(LSTM(64, input_shape=(4, 1), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(64, input_shape=(4, 1), return_sequences=True))
model.add(Dropout(0.2))
model.add(LSTM(64, input_shape=(4, 1)))
model.add(Dropout(0.2))
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')

# Train the model
model.fit(x_train, y_train, epochs = 100, batch_size = 60, shuffle = False, verbose = 1)

# Evaluate the model
trainScore = model.evaluate(x_train, y_train, batch_size=60, verbose=0)
print('Train Score: ', trainScore)
testScore = model.evaluate(x_test, y_test, batch_size=60, verbose=0)
print('Test Score: ', testScore)

# Predict using the trained model
prediction = model.predict(x_test)

testScore2 = np.sqrt(mean_squared_error(MinMaxReturn(x_test[:, 3], max, min), MinMaxReturn(prediction, max, min)))
# print(MinMaxReturn(x_test[:, 3], max, min), MinMaxReturn(prediction, max, min))
testScore3 = mean_absolute_error(MinMaxReturn(x_test[:, 3], max, min), MinMaxReturn(prediction, max, min))
print('Test Score2: ', testScore2)
print('Test Score3: ', testScore3)


prediction = np.reshape(prediction, (prediction.shape[0], prediction.shape[1], 1))
# 
# regroup the first dimension of test data and predictions from test data
xx_validation = np.hstack([x_test[:, :3], prediction])
# print("xx_validation : ", xx_validation)
xx_validation = np.reshape(xx_validation, (xx_validation.shape[0],
                           xx_validation.shape[1], 1))
# scaled to original values
prediction = MinMaxReturn(prediction, max, min)
# scaled to original values
validation_prediction = model.predict(xx_validation)
validation_prediction = MinMaxReturn(validation_prediction, max, min)

print("model.summary() :")
model.summary()
# 
plt.figure(figsize=(8, 5))
plt.axvline(x=len(train), color='r', linestyle='--')
plt.axvline(x=len(train)+1600, color='r', linestyle='--')
# print(len(train), len(train)+1600)
plt.plot(np.arange(len(raw_data)), raw_data[:, 1], label="Raw pipeline pressure data")
plt.plot(np.arange(len(raw_data)), raw_data[:, 0], linewidth=0.5,
         color='dimgray', label = "Raw tubing pressure data")
         
plt.plot([None for _ in range(3200)] + [x for x in prediction[0: 1600]],
         label="Predicted values based on test data")
plt.plot([None for _ in range(4800)] +
         [x for x in validation_prediction[1600: -1]],
         label="Predicted values based on previous predictions")


# plt.plot(np.arange(len(diff_data)), diff_data[:, 0], linewidth=0.5,
#          color='lightgray', label = "first-order differential data")
# plt.plot(np.arange(len(diff_data)), diff_data[:, 1], linewidth=0.5,
#          color='dimgray', label = "second-order differential data")

plt.xlabel('Time series data points', fontsize=13)
plt.ylabel('Pipeline pressure', fontsize=13)
plt.legend(loc=2)
plt.ylim()
plt.title("Time series data prediction using LSTM", fontsize=13)
plt.savefig("fig_7_1.png", dpi=300)
plt.savefig("fig_7_1.svg", dpi=300)
plt.savefig("fig_7_1.pdf", dpi=300)
plt.savefig("fig_7_1.tif", dpi=300)
plt.show()


