# -*- encoding: utf-8 -*-
'''
@File    :   deep_ar.py
@Time    :   2021/12/15 11:57
@Author  :   ZhangChaoYang
@Desc    :   DeepAR模型 https://arxiv.org/abs/1704.04110
'''

import os
import random
import sys

import matplotlib.pyplot as plt

sys.path.insert(0, os.getcwd())
import numpy as np
import math
import tensorflow as tf
from tensorflow.keras.models import Model, load_model
from tensorflow.keras.layers import Dense, LSTMCell, RNN
from models.losses import log_gaussian_loss
from tensorflow_probability.python.distributions.normal import Normal
import tensorflow_addons as tfa
from tensorflow.keras.callbacks import History
from util.view import draw_train_history
from util.work_flow import train_test_split
import pandas as pd
from data_process import xjtu_reader

class DeepAR(Model):
    def __init__(self, hidden_units, feature_num):
        super(DeepAR, self).__init__()
        self.rnn_cell = LSTMCell(units=hidden_units)  # RNN各个位置上的cell是共享的
        self.rnn = RNN(self.rnn_cell, return_sequences=True, return_state=True)  # 原论文里采用的stack-RNN，这里只使用单层RNN
        self.dense_mu = Dense(units=feature_num, activation=None, use_bias=True)
        self.dense_sigma = Dense(units=feature_num, use_bias=True,
                                 activation=tf.nn.softplus)  # softplus(x) = log(exp(x) + 1)，确保标准差是正的

    def call(self, inputs, training=None, mask=None):
        whole_seq_output, final_memory_state, final_carry_state = self.rnn(inputs)
        mu = self.dense_mu(whole_seq_output)
        sigma = self.dense_sigma(whole_seq_output)
        return (mu, sigma)

    def predict_future(self, inputs, pred_len):
        _, stat_h, stat_c = RNN(self.rnn_cell, return_state=True)(inputs)  # 最后一个时刻的stat_h和stat_c

        # 根据condition range最后时刻的hidden_state计算最后时刻的mu和sigma
        # mu = self.dense_mu(stat_h)
        # sigma = self.dense_sigma(stat_h)
        # z_tilde = Normal(loc=mu, scale=sigma).sample()#通过采样生成condition range最后时刻的预测值

        z_tilde = inputs[:, -1, :]  # 取出最后一个时刻的input
        mu_list = []
        sigma_list = []
        z_tilde_list = []
        for i in range(pred_len):
            # print("z_tilde.shape", z_tilde.shape)
            _, (stat_h, stat_c) = self.rnn_cell(inputs=z_tilde, states=[stat_h, stat_c])
            mu = self.dense_mu(stat_h)
            sigma = self.dense_sigma(stat_h)
            z_tilde = Normal(loc=mu, scale=sigma).sample()
            # print("z_tilde.shape", z_tilde.shape)
            # print("mu.shape", mu.shape)
            # print("sigma.shape", sigma.shape)
            # mu_list.append(tf.identity(tf.expand_dims(mu, axis=1)))
            # sigma_list.append(tf.identity(tf.expand_dims(sigma, axis=1)))
            # z_tilde_list.append(tf.identity(tf.expand_dims(z_tilde, axis=1)))
            mu_list.append(tf.identity(mu))  # 在一个variable上调用tf.identity，等价于<variable>.read_value()
            sigma_list.append(tf.identity(sigma))
            z_tilde_list.append(tf.identity(z_tilde))
        return mu_list, sigma_list, z_tilde_list

    def train(self, X_train, X_test, batch_size, epochs, learning_rate, chart_file=""):
        optimizer = tfa.optimizers.AdamW(learning_rate=learning_rate, weight_decay=1e-4)
        n = X_train.shape[0]
        min_delta = 0.001
        patience = 3
        prev_loss = 0
        successive = 0
        train_losses = []
        test_losses = []
        for epoch in range(epochs):
            np.random.shuffle(X_train)  # 每轮都打乱训练样本
            for begin in range(0, n, batch_size):
                end = begin + batch_size
                if end > n:
                    end = n
                x = X_train[begin:end]
                input_seq = x[:, :-1, :]  # input_seq和output_seq错开一位
                output_seq = x[:, 1:, :]
                with tf.GradientTape() as tape:
                    mu, sigma = self(input_seq)  # 正向传播
                    loss = tf.reduce_mean(log_gaussian_loss(mu, sigma, output_seq),
                                          axis=0)
                    # print(mu)
                    # print(sigma)
                    # print(output_seq)
                    # print(loss)
                    # sys.exit(0)
                    # print(
                    #     f'loss shape {loss.shape} mu shape {mu.shape} sigma shape {sigma.shape} output_seq shape {output_seq.shape}')
                    grads = tape.gradient(loss, self.trainable_variables)  # 计算梯度
                    optimizer.apply_gradients(zip(grads, self.trainable_variables))  # 梯度反向传播
            train_loss = np.mean(loss)
            train_losses.append(train_loss)

            input_seq = X_test[:, :-1, :]
            output_seq = X_test[:, 1:, :]
            mu, sigma = self(input_seq)  # 正向传播
            loss = tf.reduce_mean(log_gaussian_loss(mu, sigma, output_seq), axis=0)
            # print(f'test loss shape {loss.shape}')
            test_loss = np.mean(loss)
            print("epoch {} loss {} val_loss {}".format(epoch, train_loss, test_loss), flush=True)
            test_losses.append(test_loss)

            if prev_loss != 0:
                if math.fabs(prev_loss - train_loss) < min_delta:
                    successive += 1
                    if successive >= patience:
                        break
                else:
                    successive = 0
            prev_loss = train_loss

        history = History()
        history.history = {"loss": train_losses, "val_loss": test_losses}
        return history


def predict_future(ar_model, inputs, pred_len, quantile=[0.2, 0.5, 0.8], sample=300):
    '''
    :param ar_model: deep_ar模型
    :param inputs: 输入一批样本，支持批量预测。序列长度不限
    :param pred_len: 预测序列的长度，不设上限
    :param quantile: Monte Carlo采样，支持统计多个分位点
    :param sample: 采样多少次，次数越多，统计值越准
    :return: shape=(分位点个数，预测序列长度，样本个数，在每个时刻预测值的维度)
    '''
    y_pred_list = []
    for _ in range(sample):  # 采样多次，统计分位点
        _, _, z_tilde_list = ar_model.predict_future(inputs, pred_len)
        # y_pred_list.append(tf.concat(z_tilde_list, axis=1))
        y_pred_list.append(z_tilde_list)
    return np.quantile(y_pred_list, quantile, axis=0)


def trend(lst):
    N = 30  # 取30个pair point进行统计，看整体上有没有明显的上升或下降趋势
    L = len(lst)
    if L < 10:
        return 0
    rect = 0.0
    for i in range(N):
        begin = random.randint(0, L - 2)  # 双闭区间
        end = random.randint(begin + 1, L - 1)
        rect += (lst[end] - lst[begin]) / lst[begin]
    return rect / N


def load_seq(dir, wcs, feature_indexes, window_size, stride):
    lst = []
    for wc in wcs:
        wc_path = os.path.join(dir, wc)
        for file in os.listdir(wc_path):
            if not file.endswith(".npy"):
                continue
            x = np.load(os.path.join(wc_path, file))
            x = x[:, feature_indexes]  # 只选取需要的特征
            total_rows = x.shape[0]
            # df = pd.DataFrame(x)
            # x=np.hstack([x[0], (df.shift(-1) - df).to_numpy().squeeze()[:-1]])#差值更稳定，拟合差值
            for begin in range(0, total_rows, stride):
                end = begin + window_size
                if end > total_rows:
                    break
                batch_x = x[begin:end, :]
                change_ratio = trend(batch_x[:, 0].tolist())
                if math.fabs(change_ratio) > 0.1:  # 有效值有明显变化时，加入样本
                    lst.append(batch_x)
                    # draw_line(batch_x)
                elif random.random() < 0.1:  # 有效值无明显变化时，采一小部分加入样本
                    lst.append(batch_x)
    data = np.asarray(lst)
    return data


def draw_line(x):
    plt.subplots(1, 1)
    for i in range(x.shape[1]):
        plt.plot(range(x.shape[0]), x[:, i].tolist())
    plt.show()


def observe():
    '''
    观察数据
    :return:
    '''
    corpus = "xjtu"
    dir = os.path.join("data", "corpus", "1d", corpus, "stat")
    wcs = xjtu_reader.WC
    feature_indexes = [5]  # 5代表rms有效值，放在最前面。
    window_size = 30
    stride = window_size // 10
    data = load_seq(dir, wcs, feature_indexes, window_size, stride)
    print(data.shape)


if __name__ == '__main__':
    model_name = "deep_ar"
    corpus = "xjtu"
    wc_index = [0]
    all_wc = xjtu_reader.WC
    wcs = [all_wc[index] for index in wc_index]
    data_dim = "1d"
    data_trans = "stat"
    feature_indexes = [5, 4, 6, 7, 14, 15]  # 5代表rms有效值，放在最前面。详见util.signal_feature.get_time_feature()函数的返回列表
    batch_size = 32
    seq_length = 30  # 序列总长度为30
    pred_len = 10  # prediction range为10，condition range为 seq_length - pred_len
    epochs = 100
    learning_rate = 1e-4

    chart_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name + "_train_history.png")
    check_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name + "_check.png")
    model_file = os.path.join("data", "model", data_dim, corpus, data_trans, model_name)

    x = load_seq(os.path.join("corpus", data_dim, corpus, data_trans), wcs, feature_indexes, seq_length,
                 seq_length // 10)
    print("total corpus", x.shape)  # total corpus (67235, 30, 4)

    X_train, X_test = train_test_split(x)
    ar_model = DeepAR(hidden_units=int(len(feature_indexes) * 1.2), feature_num=len(feature_indexes))
    history = ar_model.train(X_train, X_test, batch_size, epochs, learning_rate, chart_file=chart_file)
    ar_model.save(model_file)
    draw_train_history(history, chart_file=chart_file, begin_epoch=2)

    saved_model = load_model(model_file)
    ar_model.rnn_cell = saved_model.rnn_cell
    ar_model.dense_mu = saved_model.dense_mu
    ar_model.dense_sigma = saved_model.dense_sigma

    cnt = 25

    assert cnt // 5 * 5 == cnt  # 需要能被5整除
    np.random.shuffle(X_test)  # 从X_test中随机选cnt条序列，看一下趋势预测的效果
    behalf_mean = np.squeeze(np.mean(X_test[:, :seq_length - pred_len, 0], axis=1))  # 对condition range有效值示平均
    second_half_mean = np.squeeze(np.mean(X_test[:, -pred_len:, 0], axis=1))  # 对prediction range有效值示平均

    part1 = X_test[second_half_mean <= 2 * behalf_mean, :, :][:int(cnt * 0.4)]
    part2 = X_test[second_half_mean > 2 * behalf_mean, :, :][:int(cnt * 0.4)]
    part3 = X_test[behalf_mean > 2 * second_half_mean, :, :][:int(cnt * 0.2)]
    X_valid = np.vstack([part1, part2, part3])
    print("X_valid shape", X_valid.shape)
    x = X_valid[0:cnt, :-pred_len, :]
    quantile = [0.2, 0.5, 0.8]  # 分位点
    print("predict input shape", x.shape)
    # prediction = np.squeeze(predict_future(ar_model, x, pred_len, quantile, sample=100))
    prediction = predict_future(ar_model, x, pred_len, quantile, sample=100)
    print("predict output shape", prediction.shape)

    # prediction shape=(分位点个数,预测序列长度，样本个数，在每个时刻预测值的维度)
    lower = prediction[0, :, :, 0]
    media = prediction[1, :, :, 0]
    upper = prediction[2, :, :, 0]
    _, axes = plt.subplots(cnt, figsize=(20, 70))  # figsize: Width, height in inches
    for i in range(cnt):
        axes[i].plot(range(seq_length), X_valid[i, :, 0], color='black', label='true')
        axes[i].plot(range(seq_length - pred_len, seq_length), media[:, i], color='red', label='media')
        axes[i].axvline(seq_length - pred_len, color='red', linestyle='dashed')
        axes[i].fill_between(range(seq_length - pred_len, seq_length), lower[:, i], upper[:, i], color='blue',
                             alpha=0.25)
    plt.legend(loc="best")
    if check_file:
        plt.savefig(check_file, format="png")
    plt.show()
