import csv
import json
import matplotlib.pyplot as plt
import numpy as np
from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from sklearn import preprocessing
from statsmodels.graphics.tsaplots import plot_acf, plot_pacf
import seaborn as sns
# plt.rcParams['font.sans-serif'] = ['KaiTi']  # 指定默认字体
# plt.rcParams['axes.unicode_minus'] = False  # 解决保存图像是负号'-'显示为方块的问题
font = {'family': 'KaiTi',
        'weight': 'normal',
        'size': 20,
        }
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
sns.set_context("paper")

TIME_SLICE_LENGTH = 10  # 时间片长度/分钟
TIME_SLICE_NUM = 100  # 一天中包含的时间片数量,6:30——23:30
TIME_STEP = 6  # 以多少个时间片预测下一个时间片（时间步）
MULTI_STEP_OUT = 6  # 多步预测的步数
UNITS_NUM = 70 # bilstm、 lstm使用的神经元个数，及units参数的值


def json_to_csv(json_file, csv_file):
    # 1. 创建文件对象
    f = open(csv_file, 'w', encoding='utf-8', newline="")
    # 2. 基于文件对象构建 csv写入对象
    csv_writer = csv.writer(f)
    # 3. 构建列表头
    csv_writer.writerow(["time", "flight_num"])

    # 4. 写入csv文件内容
    data = read_json_data(json_file)
    for row in data:
        csv_writer.writerow(row)
    # 5. 关闭文件
    f.close()


def read_json_data(json_file):
    with open(json_file, 'r', encoding='utf8')as fp:
        json_data = json.load(fp)
        data = []
        for jd in json_data:
            data.append((jd[0], jd[1]))
        return data

# 画论文的图
def draw_data_paper(pre_data, true_data, picture_path, title):
    fig, ax = plt.subplots(1, 1, figsize=(9, 6))
    ax.plot(true_data, color='k', label='实际值', linestyle='-')
    ax.plot(pre_data, color='k', label='预测值', linestyle='--')
    ax.set_xlabel('时序', fontsize=20)
    ax.set_ylabel('客流量', fontsize=20)
    ax.tick_params(labelsize=20)
    ax.legend(fontsize=20)
    sns.despine()  # 去掉右框线和上框线
    plt.savefig(picture_path)
    plt.show()

# 画ppt的图
def draw_data(pre_data, true_data, picture_path, title):
    plt.plot(pre_data, label='prediction')
    plt.plot(true_data, label='true')
    plt.legend()  # 显示图例
    plt.title(title, font)
    plt.xlabel("时序",font)
    plt.ylabel("客流量",font)
    plt.savefig(picture_path)
    plt.show()


def get_train_test_dataset(csv_file, time_slice_num=TIME_SLICE_NUM):
    df = read_csv(csv_file, header=0)
    col = df.columns.values.tolist()[-1]  # 取客流量数据
    data = df[col].values
    train = data[:-time_slice_num]  # 前n-1天
    test = data[-time_slice_num:]  # 最后一天
    return train, test


def get_train_test_time_slice(csv_file):
    df = read_csv(csv_file, header=0)
    col = df.columns.values.tolist()[0]  # 取时间戳数据
    data = df[col].values
    train = data[:-TIME_SLICE_NUM]  # 前30天
    test = data[-TIME_SLICE_NUM:]  # 第31天
    return train, test


# 统计每天的客流量
def get_flow_by_day():
    in_card_data_train, in_card_data_test = get_train_test_dataset("./data/首都国际机场-in-card.csv")
    out_card_data_train, out_card_data_test = get_train_test_dataset("./data/首都国际机场-out-card.csv")
    # 统计每天的客流量
    in_card_data = np.concatenate((in_card_data_train, in_card_data_test))
    out_card_data = np.concatenate((out_card_data_train, out_card_data_test))
    in_card_data = np.array(in_card_data).reshape(31, -1)
    out_card_data = np.array(out_card_data).reshape(31, -1)
    in_flow_by_day = []
    for one_day_data in in_card_data:
        s = sum(one_day_data)
        in_flow_by_day.append(s)

    out_flow_by_day = []
    for one_day_data in out_card_data:
        s = sum(one_day_data)
        out_flow_by_day.append(s)

    all_flow_by_day = np.array(in_flow_by_day) + np.array(out_flow_by_day)
    return in_flow_by_day, out_flow_by_day, all_flow_by_day


# 将时间片转为1小时,
def get_data_by_hour(data_by_time_slice, time_slice_num=144):
    data_by_hour = []
    n = int(time_slice_num / 24)
    i = 0
    while i < len(data_by_time_slice):
        one_hour_data = sum(data_by_time_slice[i: i + n])
        data_by_hour.append(one_hour_data)
        i = i + n
    return data_by_hour


# 数据归一化
def normalization(data):
    values = np.array(data)
    values = values.reshape(len(values), 1)
    scaler = preprocessing.MinMaxScaler()
    normalized = scaler.fit_transform(values)  # 归一化
    # inversed = scaler.inverse_transform(normalized)  # 逆变换
    return normalized


# 自相关图、偏自相关图
def draw_acf(data):
    plot_acf(data,title="自相关性")
    # plot_pacf(data)
    plt.show()


def series_to_supervised(data, n_in=1, n_out=1, dropnan=True):
    """
	将时间序列重构为监督学习数据集.
	参数:
        data: 观测值序列，类型为列表或Numpy数组。
        n_in: 输入的滞后观测值(X)长度。
        n_out: 输出观测值(y)的长度。
        dropnan: 是否丢弃含有NaN值的行，类型为布尔值。
	返回值:
	    经过重组后的Pandas DataFrame序列.
	    每一行就是n_in个历史时间片，以及对应的n_out个预测时间片
    """
    n_vars = 1 if type(data) is list else data.shape[1]
    df = DataFrame(data)
    cols, names = list(), list()
    # 输入序列 (t-n, ... t-1)
    for i in range(n_in, 0, -1):
        cols.append(df.shift(i))
        names += [('var%d(t-%d)' % (j + 1, i)) for j in range(n_vars)]
    # 预测序列 (t, t+1, ... t+n)
    for i in range(0, n_out):
        cols.append(df.shift(-i))
        if i == 0:
            names += [('var%d(t)' % (j + 1)) for j in range(n_vars)]
        else:
            names += [('var%d(t+%d)' % (j + 1, i)) for j in range(n_vars)]
    # 将列名和数据拼接在一起
    agg = concat(cols, axis=1)
    agg.columns = names
    # 丢弃含有NaN值的行
    if dropnan:
        agg.dropna(inplace=True)
    return agg


# 将数据data，由原本的时间片转换为新的时间片尺度(单位：分钟)
# 转换的时间片长度需满足整数倍数关系
def time_slice_transfer(data, old_time_slice, target_time_slice):
    # 保存转换后的数据
    data_transfer = []
    if old_time_slice > target_time_slice:
        # 扩展data
        n = int(old_time_slice / target_time_slice)
        for d in data:
            data_transfer = np.append(data_transfer, np.array([d] * n))
    else:
        # 收缩data，暂时没写
        n = target_time_slice / old_time_slice
    return data_transfer


if __name__ == '__main__':
    # json_to_csv("./2019workday/20191214首都国际机场-in-card_train.json", "./2019workday/20191214首都国际机场-in-card_train.csv")
    # json_to_csv("./2019workday/20191214首都国际机场-out-card_train.json", "./2019workday/20191214首都国际机场-out-card_train.csv")

    multi_dataset = read_csv('./data/2019allday.csv', header=0, index_col=0)

    draw_acf(multi_dataset['in_flow'].values)
    # values = [x for x in range(10)]
    # data = series_to_supervised(values)
    # print(data)
    # get_train_test_time_slice("./data/首都国际机场-in-card.csv")


