"""
Title: Timeseries forecasting for future

"""

"""
## Setup
This example requires TensorFlow 2.3 or higher.
"""

import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras

"""
## 
日纬度的历史记录

Index| Features      |Format             |Description
-----|---------------|-------------------|-----------------------
1    |date           |yyyy-MM-dd HH:mm:ss|日期
2    |open           |996.52             |开
3    |high           |-8.02              |最高
4    |low            |265.4              |最低
5    |close          |-8.9               |收
6    |volume         |93.3               |成交量
7    |money          |3.33               |成交额
8    |open_interest  |3.11               |持仓量
9    |symbol         |0.22               |编码

"""

csv_path = "/Users/aloudata/Downloads/M9999-DAY.XDCE.csv"

df = pd.read_csv(csv_path)

"""
## Raw Data Visualization

To give us a sense of the data we are working with, each feature has been plotted below.
This shows the distinct pattern of each feature over the time period from 2009 to 2016.
It also shows where anomalies are present, which will be addressed during normalization.
"""

titles = [
    "date",
    "open",
    "high",
    "low",
    "close",
    "volume",
    "money",
    "open_interest",
    "symbol",
]

feature_keys = [
    "open",
    "high",
    "low",
    "close",
    "volume",
    "money",
    "open_interest"
]

date_time_key = "date"

"""
## 数据处理

"""

split_fraction = 0.85
train_split = int(split_fraction * int(df.shape[0]))
step = 1

past = 30
future = 1
learning_rate = 0.001
batch_size = 128
epochs = 15


def normalize(data, train_split):
    data_mean = data[:train_split].mean(axis=0)
    data_std = data[:train_split].std(axis=0)
    print("mean:", data_mean)
    print("std:", data_std)
    return (data - data_mean) / data_std


print(
    "The selected parameters are:",
    ", ".join(titles),
)
"""
## 所有指标参与计算

"""
features = df[feature_keys]
features.index = df[date_time_key]
features.head()

features = normalize(features.values, train_split)
features = pd.DataFrame(features)
features.head()

train_data = features.loc[0: train_split - 1]
val_data = features.loc[train_split:]

"""
# Training dataset

The training dataset labels starts from the 792nd observation (720 + 72).
"""

start = past + future
end = start + train_split

x_train = train_data
y_train = features.iloc[start:end][[3]]

sequence_length = int(past / step)

"""
The `timeseries_dataset_from_array` function takes in a sequence of data-points gathered at
equal intervals, along with time series parameters such as length of the
sequences/windows, spacing between two sequence/windows, etc., to produce batches of
sub-timeseries inputs and targets sampled from the main timeseries.
"""

dataset_train = keras.preprocessing.timeseries_dataset_from_array(
    x_train,
    y_train,
    sequence_length=sequence_length,
    sampling_rate=step,
    batch_size=batch_size,
)

"""
## Validation dataset

The validation dataset must not contain the last 792 rows as we won't have label data for
those records, hence 792 must be subtracted from the end of the data.

The validation label dataset must start from 792 after train_split, hence we must add
past + future (792) to label_start.
"""

x_end = len(val_data) - past - future

label_start = train_split + past + future

x_val = val_data.iloc[:x_end][[i for i in range(7)]].values
y_val = val_data.iloc[past + future:][[3]]

dataset_val = keras.preprocessing.timeseries_dataset_from_array(
    x_val,
    y_val,
    sequence_length=sequence_length,
    sampling_rate=step,
    batch_size=batch_size,
)

for batch in dataset_train.take(1):
    inputs, targets = batch

print("Input shape:", inputs.numpy().shape)
print("Target shape:", targets.numpy().shape)

"""
## Training
"""

inputs = keras.layers.Input(shape=(inputs.shape[1], inputs.shape[2]))
lstm_out = keras.layers.LSTM(32)(inputs)
outputs = keras.layers.Dense(1)(lstm_out)

model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(optimizer=keras.optimizers.Adam(learning_rate=learning_rate), loss="mse")
model.summary()

"""
We'll use the `ModelCheckpoint` callback to regularly save checkpoints, and
the `EarlyStopping` callback to interrupt training when the validation loss
is not longer improving.
"""

path_checkpoint = "../example/model_checkpoint.h5"
es_callback = keras.callbacks.EarlyStopping(monitor="val_loss", min_delta=0, patience=5)

modelckpt_callback = keras.callbacks.ModelCheckpoint(
    monitor="val_loss",
    filepath=path_checkpoint,
    verbose=1,
    save_weights_only=True,
    save_best_only=True,
)

history = model.fit(
    dataset_train,
    epochs=epochs,
    validation_data=dataset_val,
    callbacks=[es_callback, modelckpt_callback],
)

"""
We can visualize the loss with the function below. After one point, the loss stops
decreasing.
"""


def visualize_loss(history, title):
    loss = history.history["loss"]
    val_loss = history.history["val_loss"]
    epochs = range(len(loss))
    plt.figure()
    plt.plot(epochs, loss, "b", label="Training loss")
    plt.plot(epochs, val_loss, "r", label="Validation loss")
    plt.title(title)
    plt.xlabel("Epochs")
    plt.ylabel("Loss")
    plt.legend()
    plt.show()


visualize_loss(history, "Training and Validation Loss")

"""
## Prediction

The trained model above is now able to make predictions for 5 sets of values from
validation set.
"""


def show_plot(actual, prediction):
    labels = ["True Future", "Model Prediction"]
    marker = [".-", ".-"]
    time_steps = list(range(0, len(actual)))
    plt.plot(time_steps, actual.flatten(), marker[0], label=labels[0])
    plt.plot(time_steps, prediction.flatten(), marker[1], label=labels[1])
    plt.legend()
    plt.xlim([time_steps[0], len(actual) +(future + 5) * 2])
    plt.xlabel("Time-Step")
    plt.show()
    return


test_val = keras.preprocessing.timeseries_dataset_from_array(
    x_val,
    y_val,
    sequence_length=sequence_length,
    sampling_rate=step,
    batch_size=len(x_val),
)

for batch in test_val.take(1):
    testInputs, testTargets = batch

print("Test shape:", testInputs.numpy().shape)
print("Test shape:", testTargets.numpy().shape)
prediction = model.predict(testInputs)


show_plot(
    testTargets.numpy(),
    prediction
)

"""
**Example available on HuggingFace**
| Trained Model | Demo |
| :--: | :--: |
| [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Model-Time%20Series-black.svg)](https://huggingface.co/keras-io/timeseries_forecasting_for_weather) | [![Generic badge](https://img.shields.io/badge/%F0%9F%A4%97%20Spaces-Time%20Series-black.svg)](https://huggingface.co/spaces/keras-io/timeseries_forecasting_for_weather) |
"""
