import seaborn as sns
from sklearn.model_selection import train_test_split
from model import *
from keras.models import load_model


def get_coal_and_charge(dict_data,index1,index2):
    """
    把煤耗量数据与负荷量数据进行区分开，用数组进行存储
    :param dict_data: 输入字典数据，包含煤耗量与负荷量
    :param index1: 煤耗量数据代表的数据标签
    :param index2: 负荷量数据代表的数据标签
    :return:   煤耗量数据，负荷量数据
    """

    coal_data=[]
    charge_data=[]
    for key,item in dict_data.items():
        dataframe_data=dict_data[key]
        data_index = list(set(dataframe_data.iloc[:,0].values.tolist()))
        if data_index[0]==index1:
            coal_data.append(dataframe_data)
        elif data_index[0]==index2:
            charge_data.append(dataframe_data)

    return coal_data,charge_data

def get_sum_data(list_all,index):
    """
    将所有给煤机或者负荷量进行汇总，得到总的每5秒消耗的负荷量或者煤耗量
    :param list_all:  煤耗量或者负荷量
    :param index:  煤耗量或者负荷量索引标识
    :return: 对应标签数据的汇总
    """
    length=len(list_all)
    data1 = list_all[0]
    data=data1.drop(data1.columns[0], axis=1, inplace=False)
    for i in range(1,length):
        dataframe_data=list_all[i]
        df=dataframe_data.drop(dataframe_data.columns[0], axis=1, inplace=False)
        data = pd.merge(data, df, left_on='时间标签', right_index=True, how='outer')
    data[index+'_sum']=data.apply(lambda x: x.sum(),axis=1)
    df_sum = data.drop(data.columns[0:-1], axis=1, inplace=False)  #删除前n-1维的数据，只保留汇总后的总数据

    return df_sum

def transformer_data(coal_data):
    """
    将煤耗量在时间上进行统一，统一煤5s实际的煤耗量
    :param coal_data: 煤耗量数据（t/h）
    :return: 实际煤耗量数据（t）
    """
    coal_clean_data=coal_data/360  #先除以3600得到每秒的数据，然后进行乘以10

    return coal_clean_data

def merge_coal_and_charge(electronic, generator):
    """
    煤耗量与负荷量进行对应合并
    :param electronic: 负荷量数据
    :param generator:  煤耗量数据
    :return:
    """
    data1=electronic
    data2=generator
    data = pd.merge(data1, data2, left_on='时间标签', right_index=True, how='outer')
    df= data.fillna(method='ffill')  # 用前一个非缺失值去填充该缺失值

    return df


def plot_coal_and_charge(data):
    """
    汇出煤耗量与负荷量的总体关系图和分布关系图
    :param data: 样本数据
    :return: 可视化图
    """
    plt.figure()
    plt.rcParams['font.sans-serif'] = ['SimHei']
    plt.rcParams['axes.unicode_minus'] = False
    # 绘图煤耗量与负荷量之间的关系
    ax = sns.scatterplot(x="coal_sum", y="charge_sum", data=data)
    ax.set_title(u'煤耗量与负荷量的关系')
    plt.xlabel(u'煤耗量(t)')
    plt.ylabel(u'负荷量(MW)')
    plt.savefig(r".\Img\煤耗量与负荷量的关系.png")
    # 绘制单独的煤耗、负荷与时间的关系
    plt.figure()
    plt.plot(data.index, data['coal_sum'],'.',label="煤耗量")
    plt.plot(data.index, data['charge_sum'],'.',label="负荷量")
    plt.legend(loc="best")  # 添加label
    ax.set_title(u'负荷量和煤耗量与时间的关系')
    plt.xlabel(u'时间')
    plt.ylabel(u'负荷量(MW)/煤耗量(t)')
    plt.savefig(r".\Img\负荷量与时间的关系.png")



def series_to_supervised(data, columns, n_in=1, n_out=1, dropnan=True):
    """
    将时间序列数据转化为LSTM可处理的数据形式
    :param data:输入的时间序列数据
    :param columns: 列名
    :param n_in: 参数
    :param n_out: 参数
    :param dropnan: 标志位
    :return: LSTM可处理的数据集
    """
    n_vars = 1 if type(data) is list else data.shape[1]
    df = pd.DataFrame(data)
    cols, names = list(), list()
    for i in range(n_in, 0, -1):
        cols.append(df.shift(i))
        names += [('%s%d(t-%d)' % (columns[j], j + 1, i)) for j in range(n_vars)]
    # 序列(t, t+1, ... t+n)
    for i in range(0, n_out):
        cols.append(df.shift(-i))
        if i == 0:
            names += [('%s%d(t)' % (columns[j], j + 1)) for j in range(n_vars)]
        else:
            names += [('%s%d(t+%d)' % (columns[j], j + 1, i)) for j in range(n_vars)]
    agg = pd.concat(cols, axis=1)
    agg.columns = names
    if dropnan:
        clean_aggr = agg.dropna()

    return clean_aggr

def invert_LSTM_style(df):
    """
    将数据转化为适应LSTM序列格式
    :param df:时间序列数据
    :return: LSTM可训练数据
    """
    merge_one=df
    dataset_columns =merge_one.columns
    label=len(dataset_columns)
    drop_label=list(range(label,2*label-1))
    reframed = series_to_supervised(merge_one, dataset_columns, 1, 1)
    reframed.drop(reframed.columns[drop_label], axis=1, inplace=True)
    #print(reframed)
    values = reframed.values

    return values

def split_train_test_data(dataset):
    """
    将现有数据集进行划分，分为测试集与训练集
    :param data: 输入数据集
    :return: 划分结果
    """
    data=np.array(dataset)
    X,y=data[:,0:-1],data[:,-1]
    x_train,x_test,y_train,y_test=train_test_split(X,y,test_size=0.3,random_state=0,shuffle=False)

    return x_train,x_test,y_train,y_test


def model_train(merge_data, algorithm,verbose):
    """
    模型的训练，选择方法进行训练
    :param data: 原始数据集输入
    :param algorithm:  xgboost或者lstm
    :return:
    """
    if algorithm == "xgboost":
        xgboost_merge_data = extract_feature_merge_data(merge_data)
        xgboost_data = data_position_convert(xgboost_merge_data,2)
        x_train,x_test,y_train,y_test=split_train_test_data(xgboost_data)
        gs=XGBoost_train(x_train, x_test, y_train, y_test,verbose)
        xgboost_error=model_predict_result(gs, x_train, x_test, y_train, y_test)
        error1 = xgboost_error[:, 0] - xgboost_error[:, 1]
        index1 = extract_outliers(error1,700)

        return index1

    elif algorithm =="LSTM":
        LSTM_merge_data = extract_feature_merge_data(merge_data)
        LSTM_data = data_position_convert(LSTM_merge_data,2)
        LSTM_merge_data=invert_LSTM_style(LSTM_data)
        x_train, x_test, y_train, y_test = split_train_test_data(LSTM_merge_data)
        model=LSTM_train(x_train, x_test, y_train, y_test,verbose)
        LSTM_error=model_predict_LSTM(model,x_test,y_test)
        error2 = LSTM_error[:, 0] - LSTM_error[:, 1]
        index2 = extract_outliers(error2,500)

        return index2


def plot_unsupervise_model(clf, X_train,title):
    """
    聚类样本的可视化过程
    :param clf: 模型
    :param X_train: 训练数据集
    :param threshold: 阈值
    :param outliers_fraction: 异常比例
    :param n_samples: 树的样本总数
    :return: 可视化图
    """
    y_pred= clf.predict(X_train)
    inliers_index,outliers_index=find_index(y_pred)
    inlier = X_train.iloc[inliers_index]
    outlier = X_train.iloc[outliers_index]
    inliers=np.array(inlier)
    outliers=np.array(outlier)
    plt.figure()
    plt.title(title)
    true_inliers= plt.scatter(inliers[:, 0], inliers[:, 1], c='red',s=20, edgecolor='k')
    true_outliers= plt.scatter(outliers[:, 0], outliers[:, 1], c='green',s=20, edgecolor='k')
    plt.legend([true_inliers, true_outliers], [u'异常点', u'正常点'], loc="upper left")
    plt.title(u'煤耗量与负荷量的关系')
    plt.xlabel(u'煤耗量(t)')
    plt.ylabel(u'负荷量(MW)')
    plt.savefig(r".\Img\基于{value}的异常检测.png".format(value=title))

    return inliers_index


def model(merge_data,index,algorithm,threshold1=700,threshold2=500,contamination=0.02):
    """
    模型框架部分
    :param merge_data: 输入数据形式
    :param index:  选择数据是训练还是直接预测
    :param algorithm: 选择LSTM还是XGBoost算法
    :return: 用于检验是否在误差范围内error
    """
    if index=="train":
        if algorithm=="xgboost":
            xgboost_index = model_train(merge_data,"xgboost",'煤耗量')
            return xgboost_index
        elif algorithm=="LSTM":
            LSTM_index = model_train(merge_data, 'LSTM','煤耗量')
            return LSTM_index
        elif algorithm=="KNN":
            clf1,inliers_index, outliers_index= KNN_Model(merge_data,contamination)  # 基于KNN算法
            KNN_index=plot_unsupervise_model(clf1, merge_data, "KNN")
            return KNN_index
        elif algorithm=="MO_GALL":
            clf2,inliers_index, outliers_index= MO_GALL_Model(merge_data, contamination)  # 基于GAN网络
            GALL_index=plot_unsupervise_model(clf2, merge_data, "MO_GALL")
            return GALL_index
        else:
            raise ValueError("you should change algorithm value")
    elif index=="test":
        if algorithm == "xgboost":
            with open('.\save_model\XGBoost.pkl', 'rb') as files:
                model = joblib.load(files)
            xgboost_merge_data = extract_feature_merge_data(merge_data)
            xgboost_data = data_position_convert(xgboost_merge_data,2)
            print(xgboost_data)
            xgboost_error=model_predict(model,xgboost_data)
            error3 = xgboost_error[:, 0] - xgboost_error[:, 1]
            xgboost_index = extract_outliers(error3, threshold1)
            return xgboost_index
        elif algorithm == "LSTM":
            model = load_model('.\save_model\LSTM.h5')
            lstm_merge_data = extract_feature_merge_data(merge_data)
            lstm_data = data_position_convert(lstm_merge_data,2)
            lstm_merge_data = invert_LSTM_style(lstm_data)
            LSTM_error=model_predict_lstm(model,lstm_merge_data)
            error4 = LSTM_error[:, 0] - LSTM_error[:, 1]
            LSTM_index = extract_outliers(error4, threshold2)
            return LSTM_index
        elif algorithm == "KNN":
            with open('.\save_model\KNN.pkl', 'rb') as files1:
                model1 = joblib.load(files1)
            KNN_index=plot_unsupervise_model(model1, merge_data, "KNN")
            return KNN_index
        elif algorithm == "MO_GALL":
            model2, inliers_index, outliers_index = MO_GALL_Model(merge_data, contamination)  # 基于GAN网络
            GALL_index=plot_unsupervise_model(model2, merge_data, "MO_GALL")
            return GALL_index

        else:
            raise ValueError("you should change algorithm value")
    else:
        raise ValueError("you should change index value")


def find_index(data):
    """
    通过index寻找异常点
    :param list: 预测序列
    :return: 异常点与正常点标签
    """
    inliers_index=list()
    outliers_index=list()
    list1=np.array(data)
    if list1 is not None:
        for i,value in enumerate(list1):
            if value == 1:
                inliers_index.append(i)
            else:
                outliers_index.append(i)
    else:
        raise ValueError("dont have predict value")

    return inliers_index,outliers_index


def NormalizeMult(data):
    """
    多维归一化  返回数据和最大最小值
    @:param: 需要归一化的数据
    @return: 返回归一化数据以及规则
    """
    #normalize 用于反归一化
    data = np.array(data)
    normalize = np.arange(2*data.shape[1],dtype='float64')

    normalize = normalize.reshape(data.shape[1],2)
    #print(normalize.shape)
    for i in range(0,data.shape[1]):
        #第i列
        list = data[:,i]
        listlow,listhigh =  np.percentile(list, [0, 100])
        # print(i)
        normalize[i,0] = listlow
        normalize[i,1] = listhigh
        delta = listhigh - listlow
        if delta != 0:
            #第j行
            for j in range(0,data.shape[0]):
                data[j,i] = (data[j,i] - listlow)/delta
    #np.save("./normalize.npy",normalize)
    return  data,normalize


def sliding_window(df,column,stride):
    """
    将未来的stride作为预警天数，进行数据统计
    :param data: 输入数据集
    :param data: 需要进行汇总的维度
    :param stride: 预警天数的设定
    :return:  新形成的数据集形式
    """
    data_series = list()
    a=['NaN']
    data_one = df[column].values
    for i in range(len(data_one) - stride+1):
        data_series.append(sum(data_one[i:i + stride]))
    if len(data_series)<df.shape[0]:
        num=df.shape[0]-len(data_series)
        list_nan=a*num
        for i in range(len(list_nan)):
            data_series.append(list_nan[i])
    df[column]=data_series

    return df

def data_eiliminate_nan(dataset):
    """
    消除数据中的NaN值
    :param dataset: 原数据集
    :return: 消除后的数据集
    """
    data=pd.Series(dataset['耗煤量（吨）'].values,index=dataset.index)
    if "NaN" in data.values:
        data = data[~ data.isin(['NaN'])]

    return data


def create_dataset(dataset, look_back=1):
    """
    划分数据集，实现单维单步操作
    :param dataset: 数据集
    :param look_back: 默认为一维
    :return: 返回经过转换的数据形式
    """
    datax, datay = [], []
    for i in range(len(dataset)-look_back):
        a = dataset[i:(i+look_back)]
        datax.append(a)
        datay.append(dataset[i + look_back])
    data_xx = np.array(datax)
    data_yy = np.array(datay)
    X = data_xx.astype('float32')
    y = data_yy.astype('float32')
    return X, y


def split_data_format(data, in_steps, out_steps):
    """
    划分数据集，实现单维多步操作
    :param data: 数据集
    :param in_steps: 输入维度的步数
    :param out_steps: 输出维度的步数
    :return: 返回经过转换的数据形式
    """
    datax = list()
    datay = list()
    for i in range(len(data)):
        end_ix = i+in_steps
        out_ix = end_ix+out_steps
        if out_ix > len(data):
            break
        data_x,data_y=data[i:end_ix], data[end_ix:out_ix]
        datax.append(data_x)
        datay.append(data_y)
    data_xx = np.array(datax)
    data_yy = np.array(datay)
    X = data_xx.astype('float32')
    y = data_yy.astype('float32')

    return X, y


def model_predict_day(available_data,index,algorithm, verbose=True, window=3):
    """
    进行有效天数的预测算法
    :param data:输入数据集
    :param index:指定是训练还是预测
    :param algorithm:算法的选择，是lstm还是arima
    :param verbose: 是否使用窗口技术处理数据集
    :param window: 窗口的大小
    :return:  得到模型的预测值
    """
    if index=='train':
        if algorithm=='lstm':
            if verbose:
                available_data_nan= sliding_window(available_data, '耗煤量（吨）', window)
                available_data=data_eiliminate_nan(available_data_nan)
                X, y = create_dataset(available_data, look_back=1)     # 单维单步的预测，预测天数即为预测值
                clf = LSTM_train_day(X, y, in_steps=1, out_steps=1)
            else:
                available_data = pd.Series(available_data['耗煤量（吨）'].values, index=available_data.index)
                X, y = split_data_format(available_data, in_steps=1, out_steps=5)  # 单维多步的预测，预测天数即为步数
                clf = LSTM_train_day(X, y, in_steps=1, out_steps=5)
            test_X = X.reshape((X.shape[0], 1, X.shape[1]))
            y_predict = clf.predict(test_X, verbose=0)
            return y_predict
        else:
            raise ValueError("you should tap model arima or lstm")
    elif index=='test':
        if algorithm == 'lstm':
            if verbose:
                model=load_model('.\save_model\predict__1_LSTM.h5')
            else:
                model=load_model('.\save_model\predict__3_LSTM.h5')
            available_data = pd.Series(available_data['耗煤量（吨）'].values, index=available_data.index)
            predict_data = np.array(available_data).astype('float32')
            predict_data = predict_data.reshape(-1, 1)
            predict_dataset = predict_data.reshape((predict_data.shape[0], 1, predict_data.shape[1]))
            y_predict = model.predict(predict_dataset, verbose=0)
            return y_predict
        else:
            raise ValueError("you should tap model arima or lstm")
    else:
        raise ValueError('you have no tap model train or test')


def predict_convert_shape(data):
    """
    将多维的数据进行汇总
    :param data: 数据集
    :return: 统一维度
    """
    value=data.shape[1]
    if value==1:
        data_sum=data.reshape(-1,1)
    else:
        data_sum=np.sum(data,axis=1).reshape(-1,1)
    return data_sum


def predict_days_warning(df,data, model, window=5,threshold=0.8):
    """
    根据条件触发报警装置
    :param df: 原始数据集
    :param data: 预测值
    :param threshold: 报警范围
    :return:  是否触发报警装置
    """
    series = list()
    y_predict = predict_convert_shape(data)
    data_one = df['库存（吨）'].values
    if model=='train':
        series=np.array(data_one)[:len(data_one)-window]
    elif model=='test':
        series=np.array(data_one)
    series_np=series.reshape(-1,1)
    y_predict= y_predict.reshape(-1,1)
    data_all=np.hstack((series_np,y_predict))
    for i in range(len(data_all)):
        if data_all[i,0]*threshold<data_all[i,0]<data_all[i,1]*threshold:
            print("库存量较充足，不需要启动报警装置")
        else:
            print("库存量即将耗尽，启动报警装置")



