import pandas as pd
import numpy as np
from math import sqrt
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from sklearn.model_selection import GridSearchCV
import joblib
import math
from pyod.models.mo_gaal import MO_GAAL
from pyod.models.knn import KNN
from xgboost import XGBRegressor
from sklearn.metrics import mean_squared_error
model_seed = 100
import statsmodels.api as sm
from statsmodels.tsa.arima_model import ARMA

def extract_feature_merge_data(data):
    """
    提取时序数据的天，时，秒信息,融合到原数据集中(仅在xgboost使用)
    :param data: 原始数据
    :return: 特征向量
    """
    data=data.reset_index(drop=False)
    data['week']=data[u'时间标签'].dt.week
    data['day'] = data[u'时间标签'].dt.day
    data['hour'] =data[u'时间标签'].dt.hour
    data['minute'] =data[u'时间标签'].dt.minute
    data = data.set_index(u'时间标签')

    return data

def data_position_convert(data,position):
    """
    将位置进行转换保持数据的位置一致，统一数据集的划分(仅在Xgboost中使用)
    :param data: 原始数据集
    :return:   统一的数据集标准
    """
    columns_list=data.columns.values.tolist()
    for i in range(position,len(columns_list)):
        data_id =data[columns_list[i]]
        #print(data_id)
        data = data.drop(columns_list[i], axis=1)
        data.insert(0, columns_list[i], data_id)

    return data

def get_mape(y_true, y_pred):
    """
    计算MAPE
    :param y_true: 样本charge_data数据的真实值
    :param y_pred: 样本charge_data数据的模型预测值
    :return: MAPE指标大小
    """
    "MAPE"
    y_true, y_pred = np.array(y_true), np.array(y_pred)
    value= np.mean(np.abs((y_true - y_pred) / y_true)) * 100

    return value

def XGBoost_train(x_train,x_test,y_train,y_test,index):
    """
    xgboost进行模型的训练
    :param x_train: 样本训练集
    :param x_test:  样本训练集标签
    :param y_train: 样本测试集
    :param y_test:  样本测试集标签
    :return: 模型
    """
    parameters={'n_estimators':range(10, 60, 10),
                'max_depth':range(2,8,1),
                'learning_rate': [0.001, 0.005, 0.01, 0.05, 0.1, 0.2],
                'min_child_weight':range(4, 12, 1)
                #'subsample':[0.1,0.3,0.5, 0.7, 0.9],
                #'gamma':[0.1, 0.3, 0.5, 0.7, 0.9],
                #'colsample_bytree':[0.5, 0.7, 0.9],
                #'colsample_bylevel':[0.5, 0.7, 0.9]
                }
    model=XGBRegressor(seed=model_seed,
                       n_estimators=100,
                       max_depth=3,
                       eval_metric='rmse',
                       learning_rate=0.1,
                       min_child_weight=1,
                       subsample=1,
                       colsample_bytree=1,
                       colsample_bylevel=1,
                       gamma=0)
    gs=GridSearchCV(estimator= model,param_grid=parameters,cv=5,refit= True,scoring='neg_mean_squared_error')
    gs.fit(x_train,y_train)
    joblib.dump(gs.best_estimator_,'.\save_model\{value}的XGBoost.pkl'.format(value=index))

    return gs


def model_predict_result(gs,x_train,x_test,y_train,y_test):
    """
    在线下进行模型的预测处理
    :param gs: 模型
    :return:   误差值
    """
    print('最优参数: ',gs.best_params_)
    est = gs.predict(x_train)
    pre = gs.predict(x_test)
    rmse=math.sqrt(mean_squared_error(y_test, pre))
    print("RMSE on dev set = %0.3f" % rmse)
    mape = get_mape(y_test, pre)
    print("MAPE on dev set = %0.3f%%" % mape)
    pre=pre.reshape(-1,1)
    y_test=y_test.reshape(-1,1)
    y_error=np.hstack([pre,y_test])

    return y_error

def model_predict(gs,merge_data):
    """
    在线上对模型进行预测，得到误差值
    :param gs:    模型
    :param x_test:  训练值
    :param y_test: 标签值
    :return: 误差值
    """
    data=np.array(merge_data)
    x_test=data[:,0:-1]
    y_test=data[:,-1]
    pre = gs.predict(x_test)
    pre=pre.reshape(-1,1)
    y_test=y_test.reshape(-1,1)
    y_error=np.hstack([pre,y_test])

    return y_error

def find_index(data):
    """
    通过index寻找异常点
    :param list: 预测序列
    :return: 异常点与正常点标签
    """
    inliers_index=list()
    outliers_index=list()
    list1=np.array(data)
    if list1 is not None:
        for i,value in enumerate(list1):
            if value == 1:
                inliers_index.append(i)
            else:
                outliers_index.append(i)
    else:
        raise ValueError("dont have predict value")

    return inliers_index,outliers_index

def calculate_variance(dps, moving_average):
    """
    计算协方差确定样本的偏离程度
    :param dps: 输入样本
    :param moving_average: 滑动平均
    :return: 偏差值
    """
    variance = 0
    flag_list = moving_average.isnull()
    count = 0
    for index in range(len(dps)):
        if flag_list[index]:
            count += 1
            continue
        variance += (dps[index] - moving_average[index]) ** 2
    variance /= (len(dps) - count)

    return variance

def extract_outliers(Error,threshold):
    """
    确定异常点位置
    :param Error: 误差数据集
    :param threshold: 设置的阈值
    :return: 异常点的位置
    """
    error_index=list()
    dps = pd.Series(Error)
    ewma_line = pd.DataFrame.ewm(dps,span=4).mean()
    ewma_var = calculate_variance(dps, ewma_line)
    for index in ewma_line.index:
        if not (ewma_line[index] -threshold*ewma_var <= dps[index] <= ewma_line[index] + threshold*ewma_var):
            #print("出现的异常点", dps[index])
            error_index.append(index)

    return error_index



def LSTM_train(train_x,test_x, train_y, test_y, index='异常检测'):
    """
    训练LSTM模型
    :param train_x: 训练集
    :param test_x:  训练集标签
    :param train_y: 测试集
    :param test_y:  测试集标签
    :return:    训练模型
    """
    train_X = train_x.reshape((train_x.shape[0], 1, train_x.shape[1]))
    test_X = test_x.reshape((test_x.shape[0], 1, test_x.shape[1]))
    #训练模型
    model = Sequential()
    model.add(LSTM(500, input_shape=(train_X.shape[1], train_X.shape[2])))
    model.add(Dense(1))
    model.compile(loss='mae', optimizer='adam')
    model.fit(train_X, train_y, epochs=50, batch_size=50, validation_data=(test_X, test_y), verbose=2, shuffle=False)
    model.save('.\save_model\{value}的LSTM.h5'.format(value=index), overwrite=True, include_optimizer=True)
    history=model.fit(train_X, train_y, epochs=50, batch_size=50, validation_data=(test_X, test_y), verbose=2, shuffle=False)
    plt.figure()
    plt.plot(history.history['loss'], label='train')
    plt.plot(history.history['val_loss'], label='test')
    plt.xlabel('epoch')
    plt.ylabel('loss')
    plt.legend()
    plt.title("误差图")
    plt.savefig('.\Img\{value}数据的LSTM训练误差图.png'.format(value=index))
    return model

def model_predict_LSTM(model,test_x,test_y):
    """
    线下阶段进行预测
    :param model: 模型
    :param test_x: 训练集
    :param test_y: 训练集标签
    :return: 误差值
    """
    test_X = test_x.reshape((test_x.shape[0], 1, test_x.shape[1]))
    yHat = model.predict(test_X)
    rmse = sqrt(mean_squared_error(yHat, test_y))
    print('Test RMSE: %.3f' % rmse)
    yHat = yHat.reshape(-1, 1)
    test_y = test_y.reshape(-1, 1)
    lstm_error = np.hstack([yHat, test_y])

    return lstm_error



def model_predict_lstm(model,merge_data):
    """
    进行LSTM模型预测阶段
    :param model: 模型
    :param x_test: 测试数据集
    :param y_test: 测试数据集标签
    :return: 误差值
    """
    data=np.array(merge_data)
    test_x=data[:,0:-1]
    test_y=data[:,-1]
    test_X = test_x.reshape((test_x.shape[0], 1, test_x.shape[1]))
    yHat = model.predict(test_X)
    rmse = sqrt(mean_squared_error(yHat, test_y))
    print('Test RMSE: %.3f' % rmse)
    yHat = yHat.reshape(-1, 1)
    test_y = test_y.reshape(-1, 1)
    lstm_error = np.hstack([yHat, test_y])

    return lstm_error



def MO_GALL_Model(data,contamination):
    """
    基于GAN网络的异常预测算法
    :param data: 输入数据集
    :param outliers_fraction: 异常点比例，这里为很小的一个值
    :return: 判断的阈值
    """
    # train MO_GAAL detector
    data=np.array(data)
    clf = MO_GAAL(k=5,stop_epochs=10,contamination=contamination)
    clf.fit(data)
    # get the prediction labels and outlier scores of the training data
    y_pred = clf.labels_  # binary labels (0: inliers, 1: outliers)
    y_score = clf.decision_scores_  # raw outlier scores
    #joblib.dump(clf, './save_model/SO_GALL.joblib')
    inliers_index, outliers_index = find_index(y_pred)

    return clf,inliers_index, outliers_index


def KNN_Model(data,contamination):
    """
    基于GAN网络的异常预测算法
    :param data: 输入数据集
    :param outliers_fraction: 异常点比例，这里为很小的一个值
    :return: 判断的阈值
    """
    # train MO_GAAL detector
    data=np.array(data)
    clf = KNN(contamination=contamination)
    clf.fit(data)
    # get the prediction labels and outlier scores of the training data
    y_pred = clf.labels_  # binary labels (0: inliers, 1: outliers)
    y_score = clf.decision_scores_  # raw outlier scores
    joblib.dump(clf, './save_model/KNN.pkl')
    inliers_index, outliers_index = find_index(y_pred)

    return clf,inliers_index,outliers_index

def plot_results(predicted_data, true_data):
    """
    原始图与真实图的可视化
    :param predicted_data: 预测数据
    :param true_data:   原始数据
    :return:  可视化图的效果
    """
    plt.figure(facecolor='white', figsize=(10, 5))
    plt.plot(true_data, label='True Data')
    plt.plot(predicted_data, label='Prediction')
    plt.legend()
    plt.savefig(r".\Img\预测值与真实值的可视化图.png")


def arima_model(data,index):
    """
    arima进行预测
    :param dataset: 原数据集
    :param index: 指定哪个数据的训练模型
    :return:  时间序列数据
    """
    #残差和白噪声检验
    time_series=np.array(data)
    arma_mod = ARMA(time_series, (0, 1, 1)).fit(disp=-1, method='mle')
    resid = arma_mod.resid
    t = sm.tsa.stattools.adfuller(resid)
    output = pd.DataFrame(
        index=['Test Statistic Value', "p-value", "Lags Used", "Number of Observations Used", "Critical Value(1%)",
               "Critical Value(5%)", "Critical Value(10%)"], columns=['value'])
    output['value']['Test Statistic Value'] = t[0]
    output['value']['p-value'] = t[1]
    output['value']['Lags Used'] = t[2]
    output['value']['Number of Observations Used'] = t[3]
    output['value']['Critical Value(1%)'] = t[4]['1%']
    output['value']['Critical Value(5%)'] = t[4]['5%']
    output['value']['Critical Value(10%)'] = t[4]['10%']
    #print(output)
    clf = sm.tsa.ARMA(time_series, order=(0,1))
    clf.fit(disp=-1, maxiter=100)
    joblib.dump(clf, './save_model/{name}_ARIMA.pkl'.format(name=index))

    return clf

def LSTM_train_day(X, y, in_steps, out_steps,index='predict_'):
    """
    根据预测天数调整LSTM
    :param X: 输入值
    :param y: 标签值
    :param in_steps: 输入维度
    :param out_steps: 输出维度
    :return: 模型
    """
    n_features=1
    X = X.reshape((X.shape[0], X.shape[1], n_features))
    # define model
    model = Sequential()
    model.add(LSTM(200, activation='relu', return_sequences=True, input_shape=(in_steps, n_features)))
    model.add(LSTM(200, activation='relu'))
    # 和之前单个步不同点在于 这一层的神经元=n_steps_out
    model.add(Dense(out_steps))
    model.compile(optimizer='adam', loss='mse')
    model.fit(X, y, epochs=50, verbose=0)
    model.save('.\save_model\{value1}_{value2}_LSTM.h5'.format(value1=index,value2=out_steps))

    return model
