import numpy as np
from pandas import read_csv, DataFrame
from sklearn.preprocessing import MinMaxScaler
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Flatten, Conv1D, MaxPooling1D
from tensorflow.python.keras.utils.np_utils import to_categorical

from util import TIME_STEP, series_to_supervised


# time_step:时间步， n_features:特征数
def oned_cnn_model(time_step, n_features, X, y, epoch_num, verbose_set):
    model = Sequential()

    model.add(Conv1D(filters=64, kernel_size=2, activation='relu',
                     strides=1, padding='valid', data_format='channels_last',
                     input_shape=(time_step, n_features)))

    model.add(MaxPooling1D(pool_size=2, strides=None, padding='valid',
                           data_format='channels_last'))

    model.add(Flatten())

    model.add(Dense(units=50, activation='relu',
                    use_bias=True, kernel_initializer='glorot_uniform', bias_initializer='zeros', ))

    model.add(Dense(units=1))

    model.compile(optimizer='adam', loss='mse',
                  metrics=['accuracy'], loss_weights=None, sample_weight_mode=None, weighted_metrics=None,
                  target_tensors=None)

    print('\n', model.summary())

    history = model.fit(X, y, batch_size=32, epochs=epoch_num, verbose=verbose_set)

    return model, history

def getDate():
    # 切换19年和20年数据只需要改下面两行
    multi_dataset = read_csv('./data/2019allday.csv', header=0, index_col=0)
    day_num = 26  # 数据包含的天数
    # multi_dataset = read_csv('./data/多源数据总表.csv', header=0, index_col=None)
    # day_num = 31  # 数据包含的天数

    dataset = DataFrame()
    # 取in_card_flow(流出机场客流)、实际降落载客数 arr_ALDT_passenger、时段、天气作为参数，预测in_card_flow
    dataset['in_flow'] = multi_dataset['in_flow']
    # dataset['out_flow'] = multi_dataset['out_flow']
    dataset['arr_ALDT_passenger'] = multi_dataset['arr_ALDT_passenger']
    dataset['arr_SIBT_passenger'] = multi_dataset['arr_SIBT_passenger']
    dataset['dep_ATOT_passenger'] = multi_dataset['dep_ATOT_passenger']
    dataset['dep_SOBT_passenger'] = multi_dataset['dep_SOBT_passenger']

    dataset['hour'] = multi_dataset['hour']
    dataset['weather'] = multi_dataset['weather']
    dataset['workday'] = multi_dataset['workday']
    # 对hour、weather、workday进行one-hot编码
    decode = to_categorical(dataset['hour'])

    # flight = DataFrame()
    # flight['arr_ALDT_passenger'] = multi_dataset['arr_ALDT_passenger']
    # dataset['arr_ALDT_passenger_t-10'] = flight.shift(10)['arr_ALDT_passenger']
    # dataset['arr_ALDT_passenger_t-2'] = flight.shift(2)['arr_ALDT_passenger']
    # dataset['arr_ALDT_passenger_t-1'] = flight.shift(1)['arr_ALDT_passenger']
    # 将NAN替换为0
    dataset.fillna(0, inplace=True)
    # 计算差分
    # dataset = dataset.diff()
    # dataset.dropna(inplace=True)

    PARAMETER_NUM = dataset.shape[1]  # 使用的数据种类数
    values = dataset.values
    # ensure all data is float
    values = values.astype('float32')

    # 归一化特征
    scaler = MinMaxScaler(feature_range=(0, 1))
    values = scaler.fit_transform(values)
    """
    将客流数据每天客流量拉到统一水平（同平均值算法一样的预处理）

    flow_by_day = []
    DAY_NUM = 31
    values = np.array(values).reshape(DAY_NUM, -1)
    for one_day_data in values:
        sum = np.sum(one_day_data)
        flow_by_day.append(sum)
    # 计算每一天客流相对第31的比例
    rate = []
    for flow in flow_by_day:
        rate.append(flow / flow_by_day[-1])
    # 将每一天的时间片客流除以对应的比例值
    for i in range(DAY_NUM):
        values[i] = np.divide(values[i], rate[i])
    # 拉平之后再变回原来的格式
    values = np.array(values).reshape(-1, 1)
    """
    # 构建监督学习问题
    reframed = series_to_supervised(values, TIME_STEP, 1)  # 6步预测下一步
    # 丢弃我们并不想预测的列
    reframed.drop(reframed.columns[[-1, -2, -3, -4, -5, -6, -7]], axis=1, inplace=True)

    # 分割为训练集和测试集
    values = reframed.values
    n_train_time_slice = (day_num - 1) * 100
    train = values[:n_train_time_slice, :]
    test = values[n_train_time_slice:, :]
    # 分为输入输出
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]

    return train_X, train_y, test_X, test_y

    # 重塑成3D形状 [样例, 时间步, 特征]
    train_X = train_X.reshape((train_X.shape[0], TIME_STEP, int(train_X.shape[1] / TIME_STEP)))
    test_X = test_X.reshape((test_X.shape[0], TIME_STEP, int(test_X.shape[1] / TIME_STEP)))

if __name__ == '__main__':
    train_seq1 = [10, 20, 30, 40, 50, 60, 70, 80, 90]
    train_seq2 = [15, 25, 35, 45, 55, 65, 75, 85, 95]
    time_step = TIME_STEP

    epoch_num = 1000
    verbose_set = 0

    train_X, train_y, n_features = split_sequences(train_seq1, train_seq2, time_step)

    # 预测
    test_X = np.array([[80, 85], [90, 95], [100, 105]])
    test_X = test_X.reshape((1, time_step, n_features))

    model, history = oned_cnn_model(time_step, n_features, train_X, train_y, epoch_num, verbose_set)
    print('\ntrain_acc:%s' % np.mean(history.history['accuracy']), '\ntrain_loss:%s' % np.mean(history.history['loss']))
    yhat = model.predict(test_X, verbose=0)
    print('\nyhat:', yhat)