#加载数据分析常用库
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.metrics import mean_absolute_error,mean_squared_error
from sklearn.preprocessing import MinMaxScaler
import matplotlib.pyplot as plt
import warnings
import time
from sklearn import metrics
warnings.filterwarnings('ignore')


tf.reset_default_graph()
#定义常量
rnn_unit = 5     #hidden layer units
timeStep = 6
input_size= 1
output_size=1
lr=0.005       #学习率
onehotSize = 1000 + 12 + 31



#把日期数值化
def getday(x):
    day = int(x.split("-")[2])
    return day

def getmonth(x):
    month = int(x.split("-")[1])
    return int(month)

def getyear(x):
    month = int(x.split("-")[0])
    return int(month)

#给出数据加载
def get_data():

    QuantityData = pd.read_csv("Data/QuantityPredict.csv", encoding="GBK")
    # QuantityData = QuantityData.loc[QuantityData["zonecode"] < 5]
    # print(QuantityData.columns.values.tolist())

    QuantityData["day"] = QuantityData["date"].apply(lambda x:getday(x))
    QuantityData["month"] = QuantityData["date"].apply(lambda x: getmonth(x))
    QuantityData["year"] = QuantityData["date"].apply(lambda x: getyear(x))

    def makeTimeSequenceData(x):
        t = time.time()
        x = x.sort_values(by=['year'])
        zonecode = x["zonecode"].tolist()[0]
        month = x["month"].tolist()[0]
        day = x["day"].tolist()[0]

        # onehot处理
        zoneCodeOnehot = [0.0] * 1000
        zoneCodeOnehot[int(zonecode) - 1] = 1.0
        monthOnehot = [0.0] * 12
        monthOnehot[int(month) - 1] = 1.0
        dayOnehot = [0.0] * 31
        dayOnehot[int(day) - 1] = 1.0
        onehotFeature = zoneCodeOnehot + monthOnehot + dayOnehot
        quantityTimeList = x["quantity"].tolist()
        timesquence = []
        for oneData in quantityTimeList[0:-1]:
            timesquence.append([oneData])
        label = quantityTimeList[-1]
        oneSequence = len(timesquence)
        # 进行填充：
        for padding in range(0, timeStep - len(timesquence)):
            timesquence.append([-1])

        # for i in range(len(quantityTimeList) - time_step - 1):
        #     # if i % batch_size == 0:
        #     sequenceData = []
        #     for j in range(i,i + time_step):
        #         quantityValue = quantityTimeList[j]
        #         dateValue  = dateTimeList[j]
        #         sequenceData.append([quantityValue,dateValue,zonecode])

        resultDic = {"onehotFeature": onehotFeature, "timesquence": timesquence,
                     "zonecode": zonecode, "label": label,
                     "oneSequence": oneSequence, "month": month, "day": day}
        # print(resultDic)
        # print(time.time() - t)
        return pd.DataFrame([resultDic])

    #给出每天的最后一个月的数据 20天的数据
    def makeTimeOrderData(x):

        #进行倒序排序
        zonecode = x["zonecode"].tolist()[0]
        x = x.sort_values(by=['year','month','day'],ascending = False)
        quantityTimeList = x["quantity"].tolist()
        monthlist = x["month"].tolist()
        daylist = x["day"].tolist()
        setJudgeRepeatData = set()
        resultList = []

        for i in range(0,len(quantityTimeList)):
            dateNum = monthlist[i]* 100 + daylist[i]
            #遇到重复就break
            if dateNum in setJudgeRepeatData:
                break
            else:
                setJudgeRepeatData.add(dateNum)
                #前20天的信息
                timeOrderSeq = []
                for oneData in quantityTimeList[i+45:i+55]:
                    timeOrderSeq.append([oneData])

                date = "2017-" + str(monthlist[i]) + "-" + str(daylist[i])
                resultList.append({"zonecode":zonecode,"month":monthlist[i],
                                   "day":daylist[i],"timeOrderSeq":timeOrderSeq,
                                   "date":date})

        return pd.DataFrame(resultList)

    quantitySequenceData_order = QuantityData.groupby(["zonecode"],as_index = False).apply(lambda x:makeTimeOrderData(x))
    quantitySequenceData_All = QuantityData.groupby(["zonecode","month","day"],as_index = False).apply(lambda x:makeTimeSequenceData(x))
    quantitySequenceData_All = pd.merge(quantitySequenceData_All, quantitySequenceData_order,
                                        how='inner', on=["zonecode","month","day"])

    print(quantitySequenceData_All.columns.values.tolist())
    # 进行训练和测试数据集的切分
    November = (quantitySequenceData_All.month == 11) & (quantitySequenceData_All.day >= 5) & (quantitySequenceData_All.day <= 25)
    quantitySequenceData_November = quantitySequenceData_All.loc[November].reset_index(drop=True)
    October = ((quantitySequenceData_All.month == 10) & (quantitySequenceData_All.day > 15)) | \
              ((quantitySequenceData_All.month == 11) & (quantitySequenceData_All.day >= 1) \
              & (quantitySequenceData_All.day < 5))
    quantitySequenceData_October = quantitySequenceData_All.loc[October].reset_index(drop=True)

    quantitySequenceData_Trian = quantitySequenceData_All.loc[(~November) & (~October)].reset_index(drop=True)

    quantitySequenceData_eval =  quantitySequenceData_All.loc[(((quantitySequenceData_All.month == 10) & (quantitySequenceData_All.day > 14))
               | (quantitySequenceData_All.month == 11) | (quantitySequenceData_All.month == 12))]

    return quantitySequenceData_Trian,quantitySequenceData_October,\
           quantitySequenceData_November,quantitySequenceData_eval


#给出拼接的预测结果
def returnPredictData():

    QuantityData = pd.read_csv("Data/QuantityPredict.csv", encoding="GBK")
    # QuantityData = QuantityData.loc[QuantityData["zonecode"] < 5]

    QuantityData["day"] = QuantityData["date"].apply(lambda x: getday(x))
    QuantityData["month"] = QuantityData["date"].apply(lambda x: getmonth(x))
    QuantityData["year"] = QuantityData["date"].apply(lambda x: getyear(x))

    predict = (((QuantityData.month == 10) & (QuantityData.day > 23))
               | (QuantityData.month == 11) | (QuantityData.month == 12))

    QuantityData_predict = QuantityData.loc[predict]

    def makeTimeSequenceData(x):
        t = time.time()
        x = x.sort_values(by=['year'])
        zonecode = x["zonecode"].tolist()[0]
        month = x["month"].tolist()[0]
        day = x["day"].tolist()[0]

        # onehot处理
        zoneCodeOnehot = [0.0] * 1000
        zoneCodeOnehot[int(zonecode) - 1] = 1.0
        monthOnehot = [0.0] * 12
        monthOnehot[int(month) - 1] = 1.0
        dayOnehot = [0.0] * 31
        dayOnehot[int(day) - 1] = 1.0
        onehotFeature = zoneCodeOnehot + monthOnehot + dayOnehot
        quantityTimeList = x["quantity"].tolist()
        timesquence = []
        for oneData in quantityTimeList:
            timesquence.append([oneData])
        oneSequence = len(timesquence)
        # 进行填充：
        for padding in range(0, timeStep - len(timesquence)):
            timesquence.append([-1])

        resultDic = {"onehotFeature": onehotFeature, "timesquence": timesquence,
                     "zonecode": zonecode, "oneSequence": oneSequence,
                     "month": month, "day": day}

        return pd.DataFrame([resultDic])


    # 给出时间顺序的数据
    def makeTimeOrderData(x):

        # 进行倒序排序
        zonecode = x["zonecode"].tolist()[0]
        x = x.sort_values(by = ['year', 'month', 'day'], ascending=False)
        quantityTimeList = x["quantity"].tolist()

        resultList = []
        i = 0

        for datenum in range(1207,1023,-1):

            #一些时间
            if (datenum >= 1131 and datenum < 1201)  or (datenum>=1032 and datenum <1101):
                continue

            month = datenum // 100
            day   = datenum % 100

            # 遇到重复就break
            # 前20天的信息
            timeOrderSeq = []
            for oneData in quantityTimeList[i:i + 10]:
                timeOrderSeq.append([oneData])

            i = i + 1
            date = "2018-" + str(month) + "-" + str(day)
            resultList.append({"zonecode": zonecode, "month": month,
                               "day": day, "timeOrderSeq": timeOrderSeq,
                               "date":date})

        return pd.DataFrame(resultList)

    quantitySequenceData_order = QuantityData.groupby(["zonecode"], as_index=False).apply(
        lambda x: makeTimeOrderData(x))
    quantitySequenceData_All = QuantityData_predict.groupby(["zonecode", "month", "day"], as_index=False).apply(
        lambda x: makeTimeSequenceData(x))
    quantitySequenceData_All = pd.merge(quantitySequenceData_All, quantitySequenceData_order,
                                        how='inner', on=["zonecode", "month", "day"])

    print(quantitySequenceData_All.columns.values)
    return quantitySequenceData_All

#——————————————————定义神经网络变量——————————————————
#给出LSTM网络结构


def lstm(X,X_order,OneHot,sequence_length):
    batch_size = tf.shape(X)[0]
    # time_step  = tf.shape(X)[1]
    # lstm_input = tf.reshape(X,[-1,input_size])  #需要将tensor转成2维进行计算，计算后的结果作为隐藏层的输入
    #
    # out_net = tf.contrib.layers.fully_connected(
    #     inputs=lstm_input,
    #     num_outputs=rnn_unit,
    #     activation_fn=tf.nn.relu,
    #     weights_initializer=tf.contrib.layers.xavier_initializer(),
    # )

    #加了一层BN
    # out_net = tf.layers.batch_normalization(out_net)

    # lstm_input=tf.reshape(out_net,[-1,time_step,rnn_unit])  #将tensor转成3维，作为lstm cell的输入
    with tf.variable_scope("year_Seq"):

        rnn_unit_year = 5
        cell_year = tf.contrib.rnn.LSTMCell(rnn_unit_year,forget_bias = 0.9,activation=tf.nn.relu)
        init_state = cell_year.zero_state(batch_size,dtype = tf.float32)
        _,lstm_final_states_year_seq = tf.nn.dynamic_rnn(cell_year, X,
                                                         initial_state = init_state,
                                                         dtype = tf.float32,
                                                        sequence_length=sequence_length)

        lstm_final_states_year_seq = tf.reshape(lstm_final_states_year_seq.h,[-1,rnn_unit_year]) #作为输出层的输入

    with tf.variable_scope("day_Seq"):

        rnn_unit_day = 5
        cell_day = tf.contrib.rnn.LSTMCell(rnn_unit_day, forget_bias=0.9, activation=tf.nn.relu)
        init_state = cell_day.zero_state(batch_size, dtype=tf.float32)
        _, lstm_final_states_day_seq = tf.nn.dynamic_rnn(cell_day, X_order,
                                                          initial_state=init_state,
                                                          dtype=tf.float32)
        #output_rnn是记录lstm每个输出节点的结果，final_states是最后一个cell的结果，final_states.h表示out
        lstm_final_states_day_seq = tf.reshape(lstm_final_states_day_seq.h,[-1,rnn_unit_day])

    #两次结果进行拼接
    final_input = tf.concat([OneHot,lstm_final_states_year_seq],1)
    final_input = tf.concat([final_input,lstm_final_states_day_seq],1)



    # 做一个全连接
    pred = tf.contrib.layers.fully_connected(
        inputs=final_input,
        num_outputs = 100,
        activation_fn = tf.nn.relu,
        weights_initializer=tf.contrib.layers.xavier_initializer()
    )
    pred = tf.layers.batch_normalization(pred)

    #两层全连接
    pred = tf.contrib.layers.fully_connected(
        inputs=pred,
        num_outputs = 10,
        activation_fn = tf.nn.relu,
        weights_initializer=tf.contrib.layers.xavier_initializer()
    )


    #做一个最终集合
    pred = tf.contrib.layers.fully_connected(inputs=pred,
                                             num_outputs = 1,
                                             activation_fn = None,
                            weights_initializer=tf.contrib.layers.xavier_initializer()
    )

    return pred

#给出相应矩阵格式
def getndarray(data):

    train_x = np.array(data["timesquence"].tolist())
    #train_x = data["timesquence"].tolist()
    train_onehot = np.array(data["onehotFeature"].tolist())
    train_y = np.array(data["label"].tolist())
    train_y = np.reshape(train_y, [-1, output_size])
    train_sequence = np.array(data["oneSequence"].tolist())
    train_order = np.array(data["timeOrderSeq"].tolist())

    return  train_x,train_onehot,train_y,train_sequence,train_order

# ——————————————————训练模型——————————————————
def train_lstm(batch_size=8000, time_step=5):

    X = tf.placeholder(tf.float32, shape=[None, None, input_size])
    X_Order = tf.placeholder(tf.float32, shape=[None, None, input_size])
    OneHot = tf.placeholder(tf.float32, shape=[None, onehotSize])
    Y = tf.placeholder(tf.float32, shape=[None, output_size])
    sequence_length = tf.placeholder(tf.float32, shape=[None,])
    trainData,OctTest,NovTest,evaltest = get_data()
    train_x,train_onehot,train_y,train_sequence,train_order = getndarray(trainData)
    #tempresult = lstmpart(X)
    pred = lstm(X,X_Order,OneHot,sequence_length)
    # 损失函数
    loss = tf.reduce_mean(tf.square(tf.reshape(pred, [-1]) - tf.reshape(Y, [-1])))
    train_op = tf.train.AdamOptimizer(lr).minimize(loss)
    size  = train_x.shape[0]
    #size = len(train_x)
    num_round = size // batch_size

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        # 重复训练5000次
        iter_time = 200
        for i in range(iter_time):

            for step in range(num_round):

                if step < num_round - 1:
                    idx = list(range(step * batch_size, step * batch_size + batch_size))
                    liststart = step * batch_size
                    listend = step * batch_size + batch_size
                else:
                    idx = list(range(step * batch_size, size))
                    liststart = step * batch_size
                    listend = -1
                    # idx = np.random.randint(0, x_train.shape[0]-1, batch_size )
                #oneBatchTrain = train_x[liststart:listend]
                oneBatchTrain = train_x[idx,:,:]
                feed_dict = {X: oneBatchTrain,
                             X_Order:train_order[idx,:,:],
                             OneHot:train_onehot[idx,:],
                             Y: train_y[idx,:],
                             sequence_length:train_sequence[idx]}
                #predre = sess.run(pred, feed_dict= feed_dict)
                _, loss_ = sess.run([train_op, loss], feed_dict= feed_dict)
                #tempresult1 = sess.run(tempresult, feed_dict= feed_dict)
                #print(tf.shape(tempresult1))

            if i % 10 == 0:
                print('iter:', i, 'loss:', loss_)
                ####进行测试数据的predict####

                Oct_x, Oct_onehot, Oct_y, Oct_sequence,Oct_order= getndarray(OctTest)
                feed_dict = {X: Oct_x,
                             X_Order: Oct_order,
                             OneHot:  Oct_onehot,
                             sequence_length:Oct_sequence}

                pre_oct = sess.run(pred, feed_dict= feed_dict)
                pre_oct = np.reshape(pre_oct,-1)
                Oct_y   = np.reshape(Oct_y,-1)
                #rmse_oct = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(tf.reshape(pre_oct, [-1]) - tf.reshape(Oct_y, [-1])))))
                rmse_oct = np.sqrt(metrics.mean_squared_error(pre_oct, Oct_y))
                print("October RMSE: " + str(rmse_oct))
                Nov_x, Nov_onehot, Nov_y, Nov_sequence,Nov_Order = getndarray(NovTest)
                feed_dict = {X: Nov_x,
                             X_Order: Nov_Order,
                             OneHot:  Nov_onehot,
                             sequence_length:Nov_sequence}
                pre_nov = sess.run(pred, feed_dict= feed_dict)
                #rmse_nov = tf.sqrt(tf.reduce_mean(tf.square(tf.subtract(tf.reshape(pre_nov, [-1]) - tf.reshape(Nov_y, [-1])))))
                pre_nov = np.reshape(pre_nov, -1)
                Nov_y = np.reshape(Nov_y, -1)
                rmse_nov = np.sqrt(metrics.mean_squared_error(pre_nov, Nov_y))

                print("November RMSE: " + str(rmse_nov))


                # for step in range(len(test_x)):
                #     prob = sess.run(pred, feed_dict={X: [test_x[step]]})
                #     predict = prob.reshape((-1))
                #     test_predict.extend(predict)
                #
                # test_predict = scaler_for_y.inverse_transform(test_predict)
                # test_y = scaler_for_y.inverse_transform(test_y)
                # rmse = np.sqrt(mean_squared_error(test_predict, test_y))
                # mae = mean_absolute_error(y_pred=test_predict, y_true=test_y)
                # print('mae:', mae, '   rmse:', rmse)

                #print("hello")

        #给出最终的预测,预测18年
        predict_test = returnPredictData()
        predict_x = np.array(predict_test["timesquence"].tolist())
        predict_onehot = np.array(predict_test["onehotFeature"].tolist())

        predict_sequence = np.array(predict_test["oneSequence"].tolist())
        predict_order = np.array(predict_test["timeOrderSeq"].tolist())

        feed_dict = {X: predict_x,X_Order: predict_order,
                     OneHot: predict_onehot,sequence_length: predict_sequence}

        predict_test["quantity"] = sess.run(pred, feed_dict=feed_dict)
        predict_test = predict_test[["quantity","zonecode","date"]]
        predict_test.to_csv("predict.csv",index=False)

        #真实数据测试
        predict_x = np.array(evaltest["timesquence"].tolist())
        predict_onehot = np.array(evaltest["onehotFeature"].tolist())

        predict_sequence = np.array(evaltest["oneSequence"].tolist())
        predict_order = np.array(evaltest["timeOrderSeq"].tolist())

        feed_dict = {X: predict_x,X_Order: predict_order,
                     OneHot: predict_onehot,sequence_length: predict_sequence}

        evaltest["quantity"] = sess.run(pred, feed_dict=feed_dict)
        evaltest = evaltest[["quantity","zonecode","date"]]
        evaltest.to_csv("real.csv", index=False)

returnPredictData()
train_lstm()

