#
#
# LSTM weather prediction demo
# Written by: Dan R 2020
#
#

#
# Core Keras libraries
#
# import seq2seq
# from seq2seq.models import SimpleSeq2Seq
import math

from tensorflow import keras
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Embedding, Attention
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import LSTM, RepeatVector, TimeDistributed
from tensorflow.keras.layers import Bidirectional

# from tensorflow.contrib import seq2seq
# import seq2seq
# from seq2seq.models import SimpleSeq2Seq

#
# For data conditioning
#
from scipy.ndimage import gaussian_filter1d
from scipy.signal import medfilt

#
# Make results reproducible
#
from numpy.random import seed

seed(1)
# from tensorflow import set_random_seed
import tensorflow as tf
import pandas as pd  # 数据分析包
import numpy as np  # 提供多维数组对象的库

tf.random.set_seed(1)

#
# Other essential libraries
#
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
from numpy import array

# Make our plot a bit formal
font = {'family': 'Arial',
        'weight': 'normal',
        'size': 10}
plt.rc('font', **font)

#
# Set input number of timestamps and training days
#
n_timestamp = 5
train_days = 1200  # number of days to train from
testing_days = 400  # number of days to be predicted
n_epochs = 20
filter_on = 1
train_model = 0
# x_or_y = 0     # 1x 0y
user_id = '001'  # 1 3 5 8 11 14 15 38

x_delta = np.zeros(testing_days - n_timestamp - 1)
y_delta = np.zeros(testing_days - n_timestamp - 1)

# seq2seq
n_in = 5
n_out = 5
encoded_length = 1
batch_size = 7


# 创建一个差分序列
def difference(dataset, interval=1):
    diff = list()
    # print("len(dataset)",len(dataset))
    for i in range(interval, len(dataset)):
        value = dataset[i] - dataset[i - interval]
        diff.append(value)
    return diff


# 差分的逆转换
def inverse_difference(last_ob, value):
    return value + last_ob


#
# Select model type
# 1: Single cell
# 2: Stacked
# 3: Bidirectional

#
model_type = 3

url = "C:/Users/admin/Desktop/WeatherLSTM-master/NewYork/"
dataset = pd.read_csv(url + 'NewYork_30sec_' + user_id + '.csv', error_bad_lines=False)
print(dataset)
if filter_on == 1:
    dataset['X'] = medfilt(dataset['X'], 3)
    dataset['X'] = gaussian_filter1d(dataset['X'], 1.2)
    dataset['Y'] = medfilt(dataset['Y'], 3)
    dataset['Y'] = gaussian_filter1d(dataset['Y'], 1.2)
# print("dataset[X]=",dataset['X'])
# print("dataset[Y]=",dataset['Y'])
#
# Set number of training and testing data
#
train_set = dataset[0:train_days].reset_index(drop=True)
test_set = dataset[train_days: train_days + testing_days].reset_index(drop=True)
for x_or_y in range(2):
    if x_or_y:
        training_set = train_set.iloc[:, 1:2].values
        testing_set = test_set.iloc[:, 1:2].values
        diff_train = difference(training_set)
        diff_test = difference(testing_set)
        # print("diff_train", len(diff_train))
        # print("diff_test", len(diff_test))
    else:
        training_set = train_set.iloc[:, 2:3].values
        testing_set = test_set.iloc[:, 2:3].values
        diff_train = difference(training_set)
        diff_test = difference(testing_set)
        # print("diff_train_y", len(diff_train))
        # print("diff_test_y", len(diff_test))
    #
    #
    # Normalize data first
    #
    sc = MinMaxScaler(feature_range=(0, 1))

    training_set_scaled = sc.fit_transform(diff_train)
    testing_set_scaled = sc.fit_transform(diff_test)


    def data_split_(sequence, pre, pre_2, pre_3, pre_4, n_timestamp, pre_num):
        X = []
        # print("sequence",sequence)
        for i in range(len(sequence)):  # len(sequence) 399 差分数据所以400-1
            seq_x = []
            end_ix = i + n_timestamp  # i+5
            if end_ix > len(sequence) - 1:  # 398
                # print("end_ix", end_ix)  # i 最大393
                break
            if pre_num == 1:
                # if i == 0:
                #     seq_x.extend(sequence[i:end_ix])  # 5
                #     # i to end_ix as input
                #     # end_ix as target output
                # else:
                seq_x.extend(sequence[i + pre_num:end_ix])  # 4
                seq_x.extend(pre[i:i + 1])  # pre_num = 1
            elif pre_num == 2:
                # if i == 0:
                #     seq_x.extend(sequence[i:end_ix])
                # elif i == 1:
                #     seq_x.extend(sequence[i:end_ix - 1])
                #     seq_x.extend(pre[i - 1:i])
                # else:
                seq_x.extend(sequence[i + pre_num:end_ix])  # 3
                seq_x.extend(pre[i:i + 1])
                seq_x.extend(pre_2[i:i + 1])
            elif pre_num == 3:
                # if i == 0:
                #     seq_x.extend(sequence[i:end_ix])
                # elif i == 1:
                #     seq_x.extend(sequence[i:end_ix - 1])
                #     seq_x.extend(pre[i - 1:i])
                # elif i == 2:
                #     seq_x.extend(sequence[i:end_ix - 2])
                #     seq_x.extend(pre[i - 2:i])
                # else:
                seq_x.extend(sequence[i + pre_num:end_ix])  # 2
                seq_x.extend(pre[i:i + 1])
                seq_x.extend(pre_2[i:i + 1])
                seq_x.extend(pre_3[i:i + 1])
            elif pre_num == 4:
                # if i == 0:
                #     seq_x.extend(sequence[i:end_ix])
                # elif i == 1:
                #     seq_x.extend(sequence[i:end_ix - 1])
                #     seq_x.extend(pre[i - 1:i])
                # elif i == 2:
                #     seq_x.extend(sequence[i:end_ix - 2])
                #     seq_x.extend(pre[i - 2:i])
                # elif i == 3:
                #     seq_x.extend(sequence[i:end_ix - 3])
                #     seq_x.extend(pre[i - 3:i])
                # else:
                seq_x.extend(sequence[i + pre_num:end_ix])  # 1
                seq_x.extend(pre[i:i + 1])
                seq_x.extend(pre_2[i:i + 1])
                seq_x.extend(pre_3[i:i + 1])
                seq_x.extend(pre_4[i:i + 1])
            X.append(seq_x)
        # print("pre_num", pre_num)
        # print(array(X))
        # print("多步array(X).shape", array(X).shape)
        return array(X)


    def data_split_pre(sequence_old, pre, n_timestamp, xuhao):
        X = []
        sequence = []
        sequence.extend(sequence_old[0:xuhao + n_timestamp])
        sequence.extend(pre[xuhao:394])
        # print("sequence",sequence)
        for i in range(len(sequence)):
            end_ix = i + n_timestamp
            if end_ix > len(sequence) - 1:
                break
            # i to end_ix as input
            # end_ix as target output
            seq_x = sequence[i:end_ix]
            X.append(seq_x)
        # print("array(X).shape", array(X).shape)
        return array(X)


    #
    # Split data into n_timestamp
    #
    def data_split(sequence, n_timestamp):
        X = []
        y = []
        # print("sequence",sequence)
        for i in range(len(sequence)):
            end_ix = i + n_timestamp
            if end_ix > len(sequence) - 1:
                # print("普通end_ix", end_ix)
                break
            # i to end_ix as input
            # end_ix as target output
            seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
            X.append(seq_x)
            y.append(seq_y)
        # print("普通", array(X).shape)
        return array(X), array(y)


    class Encoder(keras.Model):
        def __init__(self, vocab_size, embedding_dim, hidden_units):
            super(Encoder, self).__init__()
            # Embedding Layer
            self.embedding = Embedding(vocab_size, embedding_dim, mask_zero=True)
            # Encode LSTM Layer
            self.encoder_lstm = LSTM(hidden_units, return_sequences=True, return_state=True, name="encode_lstm")

        def call(self, inputs):
            encoder_embed = self.embedding(inputs)
            encoder_outputs, state_h, state_c = self.encoder_lstm(encoder_embed)
            return encoder_outputs, state_h, state_c


    class Decoder(keras.Model):
        def __init__(self, vocab_size, embedding_dim, hidden_units):
            super(Decoder, self).__init__()
            # Embedding Layer
            self.embedding = Embedding(vocab_size, embedding_dim, mask_zero=True)
            # Decode LSTM Layer
            self.decoder_lstm = LSTM(hidden_units, return_sequences=True, return_state=True, name="decode_lstm")
            # Attention Layer
            self.attention = Attention()

        def call(self, enc_outputs, dec_inputs, states_inputs):
            decoder_embed = self.embedding(dec_inputs)
            dec_outputs, dec_state_h, dec_state_c = self.decoder_lstm(decoder_embed, initial_state=states_inputs)
            attention_output = self.attention([dec_outputs, enc_outputs])

            return attention_output, dec_state_h, dec_state_c


    def Seq2Seq(maxlen, embedding_dim, hidden_units, vocab_size):
        """
        seq2seq model
        """
        # Input Layer
        encoder_inputs = Input(shape=(maxlen,), name="encode_input")
        decoder_inputs = Input(shape=(None,), name="decode_input")
        # Encoder Layer
        encoder = Encoder(vocab_size, embedding_dim, hidden_units)
        enc_outputs, enc_state_h, enc_state_c = encoder(encoder_inputs)
        dec_states_inputs = [enc_state_h, enc_state_c]
        # Decoder Layer
        decoder = Decoder(vocab_size, embedding_dim, hidden_units)
        attention_output, dec_state_h, dec_state_c = decoder(enc_outputs, decoder_inputs, dec_states_inputs)
        # Dense Layer
        dense_outputs = Dense(vocab_size, activation='softmax', name="dense")(attention_output)
        # seq2seq model
        model = Model(inputs=[encoder_inputs, decoder_inputs], outputs=dense_outputs)
        return model


    X_train, y_train = data_split(training_set_scaled, n_timestamp)
    X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 1)
    # print("testing_set_scaled", testing_set_scaled)
    X_test, y_test = data_split(testing_set_scaled, n_timestamp)
    read_x_test = X_test
    # print("len X_test", len(X_test))
    # print(X_test)
    X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 1)
    # print("X_train=",X_train)
    # print("X_train.shape[0]=",X_train.shape[0]) 1195
    # print("X_train.shape[1]=",X_train.shape[1]) 5

    # X_train = X_train.reshape(X_train.shape[0], X_train.shape[1], 2)

    # X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], 2)
    if model_type == 1:
        # Single cell LSTM
        model = Sequential()
        # model.add(LSTM(units=50, activation='relu', input_shape=(X_train.shape[1], 2)))
        model.add(LSTM(units=256, activation='relu', input_shape=(X_train.shape[1], 1)))
        # model.add(Dense(units=2))
        model.add(Dense(units=1))
    if model_type == 2:
        # Stacked LSTM
        model = Sequential()
        # model.add(LSTM(50, activation='relu', return_sequences=True, input_shape=(X_train.shape[1], 2)))
        model.add(LSTM(128, activation='relu', return_sequences=True, input_shape=(X_train.shape[1], 1)))
        model.add(LSTM(128, activation='relu'))
        model.add(Dense(1))
    if model_type == 3:
        # Bidirectional LSTM
        model = Sequential()
        model.add(Bidirectional(LSTM(50, activation='relu'), input_shape=(X_train.shape[1], 1)))
        model.add(Dense(1))
    if model_type == 4:
        model = keras.models.Sequential()
        model = Sequential()  # 定义一个堆叠的顺序模型
        model.add(LSTM(960, input_shape=(X_train.shape[1], 1), return_sequences=True))
        model.add(keras.layers.Dropout(0.1))
        model.add(LSTM(960, return_sequences=True))
        model.add(keras.layers.Dropout(0.1))
        model.add(LSTM(960, return_sequences=False))  # 返回维度为 240 的单个向量
        model.add(keras.layers.Dropout(0.1))
        model.add(Dense(1))
        model.add(keras.layers.Activation("relu"))
    if model_type == 5:
        model = Sequential()
        model.add(LSTM(50, input_shape=(X_train.shape[1], 1)))
        model.add(RepeatVector(n_out))
        model.add(LSTM(50, return_sequences=True))
        model.add(TimeDistributed(Dense(1)))
        # maxlen = 10
        # embedding_dim = 50
        # hidden_units = 128
        # vocab_size = 10000
        # model = Seq2Seq(maxlen, embedding_dim, hidden_units, vocab_size)
        # model = seq2seq.TrainingHelper(input=[batch_size, n_in, encoded_length], sequence_length=batch_size)
        # # seq2seq2 LSTM
        # model = Sequential()
        # model.add(LSTM(50, batch_input_shape=(batch_size, n_in, encoded_length), return_sequences=False, stateful=True))
        # model.add(RepeatVector(n_out))
        # # 如果输入的形状为（None,32），经过添加RepeatVector(n_out)层之后，输出变为（None,n_out,32）,
        # # RepeatVector不改变我们的步长，改变我们的每一步的维数（即：属性长度）
        # model.add(LSTM(50, return_sequences=True, stateful=True))
        # model.add(TimeDistributed(Dense(encoded_length, activation='softmax')))
        # model.add(LSTM(50, batch_input_shape=(batch_size, 7, 1), return_sequences=True, stateful=True))
        # model.add(TimeDistributed(Dense(1, activation='softmax')))
        # # 把每一步的维度变成encoded_length，不改变步长的大小

    #
    # Start training
    #
    if train_model:
        model.compile(optimizer='adam', loss='mean_squared_error')
        model.summary()
        history = model.fit(X_train, y_train, epochs=n_epochs, batch_size=32)
        loss = history.history['loss']
        epochs = range(len(loss))
        if x_or_y:
            model.save("./model/" + "lstm_model_" + user_id + "_x.h5")
        else:
            model.save("./model/" + "lstm_model_" + user_id + "_y.h5")
    else:
        if x_or_y:
            model = keras.models.load_model("./model/" + "lstm_model_" + user_id + "_x.h5")
        else:
            model = keras.models.load_model("./model/" + "lstm_model_" + user_id + "_y.h5")
    #
    # Get predicted data
    #
    # print("testing_set_scaled", testing_set_scaled)
    y_predicted = model.predict(X_test)
    X_test_pre_1 = data_split_(testing_set_scaled, y_predicted, 0, 0, 0, n_timestamp, 1)
    X_test_pre_1 = X_test_pre_1.reshape(X_test_pre_1.shape[0], X_test_pre_1.shape[1], 1)
    y_predicted_2 = model.predict(X_test_pre_1)
    X_test_pre_2 = data_split_(testing_set_scaled, y_predicted, y_predicted_2, 0, 0, n_timestamp, 2)
    X_test_pre_2 = X_test_pre_2.reshape(X_test_pre_2.shape[0], X_test_pre_2.shape[1], 1)
    y_predicted_3 = model.predict(X_test_pre_2)
    X_test_pre_3 = data_split_(testing_set_scaled, y_predicted, y_predicted_2, y_predicted_3, 0, n_timestamp, 3)
    X_test_pre_3 = X_test_pre_3.reshape(X_test_pre_3.shape[0], X_test_pre_3.shape[1], 1)
    y_predicted_4 = model.predict(X_test_pre_3)
    X_test_pre_4 = data_split_(testing_set_scaled, y_predicted, y_predicted_2, y_predicted_3, y_predicted_4,
                               n_timestamp, 4)
    X_test_pre_4 = X_test_pre_4.reshape(X_test_pre_4.shape[0], X_test_pre_4.shape[1], 1)
    y_predicted_5 = model.predict(X_test_pre_4)  # 5步预测
    # a = np.array((394, 394, 5, 1))
    #  X_test_pre = np.array((394, 5, 1))
    # Y = []
    # for xuhao in range(394):
    #     print("xuhao=", xuhao)
    #     X = []
    #     sequence = []
    #     sequence.extend(testing_set_scaled[0:xuhao + n_timestamp])
    #     sequence.extend(y_predicted[xuhao:394])
    #     # print("sequence",sequence)
    #     for i in range(len(sequence)):
    #         end_ix = i + n_timestamp
    #         if end_ix > len(sequence) - 1:
    #             break
    #         # i to end_ix as input
    #         # end_ix as target output
    #         seq_x = sequence[i:end_ix]
    #         X.append(seq_x)
    #     Y.append(X)
    #     X_pre = array(X)
    #     X_pre = X_pre.reshape(X_pre.shape[0], X_pre.shape[1], 1)
    #     y_predicted = model.predict(X_pre)
    # Y_array = array(Y)
    #
    # 'De-normalize' the data
    #

    y_predicted_descaled = sc.inverse_transform(y_predicted)
    y_predicted_descaled_2 = sc.inverse_transform(y_predicted_2)
    y_predicted_descaled_3 = sc.inverse_transform(y_predicted_3)
    y_predicted_descaled_4 = sc.inverse_transform(y_predicted_4)
    y_predicted_descaled_5 = sc.inverse_transform(y_predicted_5)
    y_train_descaled = sc.inverse_transform(y_train)
    y_test_descaled = sc.inverse_transform(y_test)
    # print("len y_predicted_descaled",len(y_predicted_descaled))
    # print("len y_test_descaled",len(y_test_descaled))
    # print("y_predicted_descaled",y_predicted_descaled)
    # print("y_test_descaled",y_test_descaled)
    for i in range(len(y_predicted_descaled)):
        y_predicted_descaled[i] = y_predicted_descaled[i] + testing_set[n_timestamp + i]
        y_predicted_descaled_2[i] = y_predicted_descaled_2[i] + testing_set[n_timestamp + i]
        y_predicted_descaled_3[i] = y_predicted_descaled_3[i] + testing_set[n_timestamp + i]
        y_predicted_descaled_4[i] = y_predicted_descaled_4[i] + testing_set[n_timestamp + i]
        y_predicted_descaled_5[i] = y_predicted_descaled_5[i] + testing_set[n_timestamp + i]
        y_test_descaled[i] = y_test_descaled[i] + testing_set[n_timestamp + i]
    # print("y_predicted_descaled-y_test_descaled", y_predicted_descaled - y_test_descaled)
    # print("pre-true",y_predicted_descaled-y_test_descaled)
    y_pred = y_predicted.ravel()
    y_pred = [round(yx, 2) for yx in y_pred]
    y_tested = y_test.ravel()
    for i in range(testing_days - n_timestamp - 1):
        if i >= 2:
            if y_test_descaled[i - 2] == y_test_descaled[i - 1]:
                y_predicted_descaled[i] = y_test_descaled[i - 1]
                y_predicted_descaled_2[i] = y_test_descaled[i - 1]
                y_predicted_descaled_3[i] = y_test_descaled[i - 1]
                y_predicted_descaled_4[i] = y_test_descaled[i - 1]
                y_predicted_descaled_5[i] = y_test_descaled[i - 1]
    predict_sum = 0
    for i in range(testing_days - n_timestamp - 1):
        if i < 390:
            advange = abs(y_predicted_descaled[i] - y_test_descaled[i]) + abs(y_predicted_descaled_2[i] - y_test_descaled[i+1])
            abs(y_predicted_descaled_3[i] - y_test_descaled[i+2]) + abs(y_predicted_descaled_4[i] - y_test_descaled[i+3]) + abs(
                y_predicted_descaled_5[i] - y_test_descaled[i+4])
        else:
            advange = 5*abs(y_predicted_descaled[i] - y_test_descaled[i])
        # predict_sum = predict_sum + abs(y_predicted_descaled[i] - y_test_descaled[i])
        predict_sum = predict_sum + advange/5
        if x_or_y:
            # x_delta[i] = abs(y_predicted_descaled[i] - y_test_descaled[i])
            x_delta[i] = advange/5
        else:
            # y_delta[i] = abs(y_predicted_descaled[i] - y_test_descaled[i])
            y_delta[i] = advange/5
    predict_delta_advange = predict_sum / (testing_days - n_timestamp - 1)
    if x_or_y:
        print("predict_delta_advange_x", predict_delta_advange)
        y_test_descaled_x = y_test_descaled
        y_predicted_descaled_x = y_predicted_descaled
        y_predicted_descaled_x_2 = y_predicted_descaled_2
        y_predicted_descaled_x_3 = y_predicted_descaled_3
        y_predicted_descaled_x_4 = y_predicted_descaled_4
        y_predicted_descaled_x_5 = y_predicted_descaled_5
    else:
        print("predict_delta_advange_y", predict_delta_advange)
        y_test_descaled_y = y_test_descaled
        y_predicted_descaled_y = y_predicted_descaled
        y_predicted_descaled_y_2 = y_predicted_descaled_2
        y_predicted_descaled_y_3 = y_predicted_descaled_3
        y_predicted_descaled_y_4 = y_predicted_descaled_4
        y_predicted_descaled_y_5 = y_predicted_descaled_5
    # print("len y_test_descaled", len(y_test_descaled))
    # print("y_test_descaled", y_test_descaled)
predict_sum_xy = 0
predict_right = 0
for i in range(testing_days - n_timestamp - 1):
    predict_sum_xy = predict_sum_xy + np.sqrt(x_delta[i] ** 2 + y_delta[i] ** 2)
    if np.sqrt(x_delta[i] ** 2 + y_delta[i] ** 2) < 50:
        predict_right = predict_right + 1
predict_delta_advange_xy = predict_sum_xy / (testing_days - n_timestamp - 1)
print("predict_delta_advange_xy", predict_delta_advange_xy)
# print("predict_right", predict_right)
print("预测精准度是", round(predict_right / (testing_days - n_timestamp - 1), 3) * 100, "%")

"""
#
# Show results
#
plt.figure(figsize=(8, 7))

plt.subplot(3, 1, 1)
plt.plot(dataset['X'],dataset['Y'], color='black', linewidth=1, label='True value')
plt.ylabel("Y")
plt.xlabel("X")
plt.title("All data")

# plt.subplot(3, 2, 3)
# plt.plot(y_test_descaled, color='black', linewidth=1, label='True value')
# plt.plot(y_predicted_descaled, color='red', linewidth=1, label='Predicted')
# plt.legend(frameon=False)
# plt.ylabel("X")
# plt.xlabel("Time")
# plt.title("Predicted data (n Times)")

plt.subplot(3, 2, 3)
plt.plot(y_test_descaled_x,y_test_descaled_y, color='black', linewidth=1, label='True value')
plt.plot(y_predicted_descaled_x,y_predicted_descaled_y,color='red', linewidth=1, label='Predicted')
plt.legend(frameon=False)
plt.ylabel("X")
plt.xlabel("Y")
plt.title("Predicted location")

plt.subplot(3, 2, 4)
plt.plot(y_test_descaled_x, color='black', linewidth=1, label='True value')
plt.plot(y_predicted_descaled_x, color='red', label='Predicted')
plt.legend(frameon=False)
plt.ylabel("X")
plt.xlabel("Time")
plt.title("Predicted data (first 400 Times)")
# plt.subplot(3, 2, 4)
# plt.plot(y_test_descaled_x[0:75],y_test_descaled_y[0:75], color='black', linewidth=1, label='True value')
# plt.plot(y_predicted_descaled_x[0:75],y_predicted_descaled_y[0:75], color='red', label='Predicted')
# plt.legend(frameon=False)
# plt.ylabel("X")
# plt.xlabel("Time")
# plt.title("Predicted data (first 75 Times)")

# plt.subplot(3, 3, 7)
# plt.plot(dataset['Y'], color='black', linewidth=1, label='True value')
# plt.ylabel("Y")
# plt.xlabel("Time")
# plt.title("All data")
# plt.subplot(3, 3, 7)
# plt.plot(epochs, loss, color='black')
# plt.ylabel("Loss (MSE)")
# plt.xlabel("Epoch")
# plt.title("Training curve")
plt.subplot(3, 3, 7)
plt.plot(y_test_descaled_y, color='black', linewidth=1, label='True value')
plt.plot(y_predicted_descaled_y, color='red', label='Predicted')
plt.legend(frameon=False)
plt.ylabel("Y")
plt.xlabel("Time")
plt.title("Predicted data (first 400 Times)")

plt.subplot(3, 3, 8)
plt.plot(y_test_descaled_x - y_predicted_descaled_x, color='black')
plt.plot(y_test_descaled_y - y_predicted_descaled_y, color='red')
plt.ylabel("Residual")
plt.xlabel("Time")
plt.title("Residual plot")

# plt.subplot(3, 3, 9)
# plt.scatter(y_predicted_descaled, y_test_descaled, s=2, color='black')
# plt.ylabel("Y true")
# plt.xlabel("Y predicted")
# plt.title("Scatter plot")

plt.subplots_adjust(hspace=0.5, wspace=0.3)
plt.show()

mse = mean_squared_error(y_test_descaled, y_predicted_descaled)
r2 = r2_score(y_test_descaled, y_predicted_descaled)
print("mse=" + str(round(mse, 2)))
print("r2=" + str(round(r2, 2)))
"""

# other pre

import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from pylab import mpl


def linear_trend(x, y):
    lreg = LinearRegression()
    lreg.fit(x, y)
    coef = round(lreg.coef_[0][0], 4)
    intercept = round(lreg.intercept_[0], 4)

    pred = lreg.predict(x)
    # plt.plot(pred)
    # plt.title("线性趋势方程 yt={} + ({})t".format(intercept, coef))
    # plt.show()
    return pred


pre_linear_x = linear_trend(dataset[['Time']], dataset[['X']])
pre_linear_y = linear_trend(dataset[['Time']], dataset[['Y']])
