import numpy as np
import matplotlib
from keras.models import load_model

matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from pandas import read_csv
import math
import pandas as pd
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from matplotlib import pyplot
from sklearn.metrics import mean_squared_error
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import explained_variance_score
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import mean_squared_error
import keras
import openpyxl
import json

seed = 7
batch_size = 32
epochs = 200  # 迭代次数
filename = 'G:/dv项目/001/data.csv'
look_back = 5  # 用五个数据来预测
train_size = 320


def read_data(data_list, key_name):

    # 将所有数据中为key的值整理到一起 成[[],[]]形式
    arr = []
    for i in data_list:
        data = i.get(key_name)
        if isinstance(data, (int, float)):
            arr.append([data])
    print('len',len(arr))
    return arr


def create_dataset(dataset):
    dataX, dataY = [], []
    for i in range(len(dataset) - look_back - 1):
        x = dataset[i: i + look_back, 0]  # 训练集
        dataX.append(x)
        y = dataset[i + look_back, 0]  # 测试集
        dataY.append(y)
        # print('X: %s, Y: %s' % (x, y))
    return np.array(dataX), np.array(dataY)


def build_model(train):
    model = Sequential()  # 顺序模型
    model.add(LSTM(units=64, input_shape=(1, train.shape[1])))
    # model.add(LSTM(units=32))
    model.add(Dense(units=1))  # 输出空间维度
    my_adam_optimizer = keras.optimizers.Adam(lr=0.01, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0,
                                              amsgrad=False)
    model.compile(loss='mean_squared_error', optimizer=my_adam_optimizer)  # loss为均方误差，用adam优化器
    return model


def mape(y_validation, predict_validation):
    return np.mean(np.abs((predict_validation - y_validation) / y_validation)) * 100


# 进行模型预测 预测生成60个预测数据
def prediction(model, historical_data):
    # 设置预测步数
    prediction_steps = 60
    # 初始化一个空数组来存储预测结果
    predictions = []
    # 使用循环进行预测
    for i in range(prediction_steps):
        # 使用模型进行预测，model.predict() 是你的LSTM模型的预测函数
        predicted_value = model.predict(historical_data)[0][0]
        # 将预测值添加到结果列表中
        predictions.append(predicted_value)
        # 更新历史数据，将预测值添加到历史数据中
        historical_data = np.append(historical_data, predicted_value)
        historical_data = historical_data[1:]

    # predictions 现在包含了未来60个时间步的预测值
    print(predictions)


def main(DM_name):
    from sklearn.preprocessing import StandardScaler

    # 设置随机种子
    np.random.seed(seed)  # 打乱数据

    # # 导入数据
    # data = pd.read_csv(filename, usecols=[1], engine='python')  # footer drop

    # dataset = data.values.astype('float32')
    # print("data",dataset.shape)

    # 选择 全部竖向 还是 全部水平
    df = pd.read_excel('static/data/new_fulldata.xlsx', sheet_name="全部竖向")
    json_data = df.to_json(orient='records')

    # 提取表头
    data_list = json.loads(json_data)
    print(data_list)
    key_name = [item for item in list(data_list[0].keys()) if item.startswith('DM')]
    print(key_name)
    # 获取数据
    dataset = read_data(data_list, DM_name)
    # for name in key_name:
    dataset, predict_train_plot, predict_validation_plot, predict_60_plot = train_model(dataset, DM_name)
    # train_model(dataset, name)
    return dataset, predict_train_plot, predict_validation_plot, predict_60_plot


def train_model(dataset, name):
    train_size = len(dataset) // 2

    # 标准化数据
    scaler = MinMaxScaler()
    dataset = scaler.fit_transform(dataset)

    train, validation = dataset[: train_size], dataset[train_size:]

    # 创建dataset，让数据产生相关性
    # X_train, y_train = create_dataset(train)
    # X_validation, y_validation = create_dataset(validation)

    # 将输入转化成为【sample， time steps, feature]
    # X_train = np.reshape(X_train, (X_train.shape[0], 1, X_train.shape[1]))
    # X_validation = np.reshape(X_validation, (X_validation.shape[0], 1, X_validation.shape[1]))
    # print('X-train为：',X_train)

    # 训练模型
    # 读取训练好的模型
    # model = build_model(train)
    model_name = 'static/Horizontal_model/' + name + '_model.h5'
    model = load_model(model_name)
    model.summary()
    model.fit(train, train, epochs=epochs, batch_size=batch_size, verbose=1)

    # 模型预测数据
    predict_train = model.predict(train)
    predict_validation = model.predict(validation)

    # 生成60个预测数据
    # 多步预测
    prediction_steps = 60
    historical_data = dataset[-train.shape[1]:]  # 为啥只取一个数据
    predictions = []

    for i in range(prediction_steps):
        predicted_value = model.predict(historical_data.reshape(1, train.shape[1], 1))[0][0]
        predictions.append([predicted_value])
        historical_data = np.append(historical_data, predicted_value)
        historical_data = historical_data[1:]

    # predictions 包含了未来60个时间步的预测值
    # 反标准化数据 --- 目的是保证MSE的准确性
    predict_train = scaler.inverse_transform(predict_train)
    y_train = scaler.inverse_transform(train)
    predict_validation = scaler.inverse_transform(predict_validation)
    y_validation = scaler.inverse_transform(validation)
    predictions = scaler.inverse_transform(predictions)
    print(predictions)

    # 评估模型
    train_rmse = math.sqrt(mean_squared_error(y_train, predict_train[:, 0]))
    print('Train Score: %.3f RMSE' % train_rmse)
    validation_rmse = math.sqrt(mean_squared_error(y_validation, predict_validation[:, 0]))
    print('Validation Score: %.3f RMSE' % validation_rmse)
    train_mse = mean_squared_error(y_train, predict_train[:, 0])
    print('Train Score: %.3f MSE' % train_mse)
    validation_mse = mean_squared_error(y_validation, predict_validation[:, 0])
    print('validation Score: %.3f MSE' % validation_mse)
    train_mae = mean_absolute_error(y_train, predict_train[:, 0])
    print('Train Score: %.3f MAE' % train_mae)
    validation_mae = mean_absolute_error(y_validation, predict_validation[:, 0])
    print('Validation Score: %.3f MAE' % validation_mae)
    train_mape = mape(y_train, predict_train[:, 0])
    print('Train Score: %.3f MAPE' % train_mape)
    validation_mape = mape(y_validation, predict_validation[:, 0])
    print('validation Score: %.3f MAPE' % validation_mape)
    extended_length = 60
    # 构建通过训练集进行预测的图表数据
    predict_train_plot = np.empty((len(dataset) + extended_length, dataset.shape[1]))
    predict_train_plot[:, :] = np.nan
    predict_train_plot[:len(predict_train), :] = predict_train

    # 构建通过评估数据集进行预测的图表数据
    predict_validation_plot = np.empty((len(dataset) + extended_length, dataset.shape[1]))
    predict_validation_plot[:, :] = np.nan
    predict_validation_plot[len(predict_train):len(dataset), :] = predict_validation
    #
    # 显示预测多的60个数据
    # extended_length = 60
    predict_60_plot = np.empty((len(dataset) + extended_length, dataset.shape[1]))
    predict_60_plot[:, :] = np.nan
    predict_60_plot[len(dataset):len(predictions) + len(dataset), :] = predictions

    # # 图表显示
    dataset = scaler.inverse_transform(dataset)
    data = np.empty((len(dataset) + extended_length, dataset.shape[1]))
    data[:, :] = np.nan
    data[:len(dataset), :] = dataset

    # plt.plot(dataset, color='blue')
    # plt.plot(predict_train_plot, color='green')
    # plt.plot(predict_validation_plot, color='red')
    # plt.plot(predict_60_plot, color='yellow')
    # plt.show()

    # 保存模型  Vertical_model  Horizontal_model
    # model.save('./Horizontal_model/' + name + '_model.h5')
    return data, predict_train_plot, predict_validation_plot, predict_60_plot


# main('DM2-3')
# read_data()
