import datetime
import numpy as np
import pymysql
from sklearn.preprocessing import OneHotEncoder, MinMaxScaler
from sklearn import ensemble
import pickle

station_model = {}
line_model = {}


def savemodel():
    conn = pymysql.connect(host='localhost',  # 连接名称
                           user='root',  # 用户名
                           passwd='q19723011',  # 密码
                           port=3306,  # 端口，默认为3306
                           db='month6',  # 数据库
                           charset='utf8',  # 字符编码
                           )
    cur = conn.cursor()  # 生成游标对象
    sql = """select *from station"""
    cur.execute(sql)
    datas = list(cur.fetchall())
    stationIDs = np.unique(np.array(datas)[:, 0:1])
    print(len(stationIDs))

    sql = """select *from lineflow"""
    cur.execute(sql)
    datas = list(cur.fetchall())
    lineIDs = np.unique(np.array(datas)[:, 0:1])
    conn.commit()
    cur.close()  # 关闭游标
    conn.close()  # 关闭连接

    for i in range(len(stationIDs)):
        stationID = stationIDs[i]
        print("站点", stationID)
        station_model[stationID] = {}
        station_model[stationID]["0"] = {}
        station_model[stationID]["1"] = {}
        rfpredict_station(stationID)
        print(station_model[stationID])
    for i in range(len(lineIDs)):
        lineID = lineIDs[i]
        print("线路", lineID)
        line_model[lineID] = {}
        line_model[lineID]["0"] = {}
        line_model[lineID]["1"] = {}
        rfpredict_line(lineID)
        print(line_model[lineID])
    svr = {"station": station_model, "line": line_model}
    with open('D:\PythonProject\graduationProject\saveModels\rf.pkl', 'wb') as f:
        pickle.dump(svr, f)


def rfpredict_station(stationID):
    # print(request)
    # print("12345")
    # lineID = request.GET["lineID"]
    # stationID = request.GET["stationID"]
    # date = request.GET["date"]
    # lineID = '3'
    # stationID = '303'
    # date = '2018-06-03'
    stationID = stationID
    m = datetime.timedelta(hours=7)
    while m <= datetime.timedelta(hours=23):
        station_data = get_station_data(m, stationID)
        station_model_0 = rf_train(station_data[0])
        station_model_1 = rf_train(station_data[1])
        station_model[stationID]["0"][m] = station_model_0
        station_model[stationID]["1"][m] = station_model_1
        m = m + datetime.timedelta(minutes=5)


def rfpredict_line(lineID):
    # print(request)
    # print("12345")
    # lineID = request.GET["lineID"]
    # stationID = request.GET["stationID"]
    # date = request.GET["date"]
    # lineID = '3'
    # stationID = '303'
    # date = '2018-06-03'
    lineID = lineID
    m = datetime.timedelta(hours=7)
    while m <= datetime.timedelta(hours=23):
        line_data = get_line_data(m, lineID)
        line_model_0 = rf_train(line_data[0])
        line_model_1 = rf_train(line_data[1])
        line_model[lineID]["0"][m] = line_model_0
        line_model[lineID]["1"][m] = line_model_1
        m = m + datetime.timedelta(minutes=5)


def day_type(day):
    if 1 <= day <= 5:
        return 0
    else:
        return 1
    return 1


def get_station_data(time, stationID):
    conn = pymysql.connect(host='localhost',  # 连接名称
                           user='root',  # 用户名
                           passwd='q19723011',  # 密码
                           port=3306,  # 端口，默认为3306
                           db='month6',  # 数据库
                           charset='utf8',  # 字符编码
                           )
    cur = conn.cursor()  # 生成游标对象
    dataProcess = [[], []]
    start_time = "".join(str(time - datetime.timedelta(minutes=30)).split(':')[:3])
    sql = "select * from station_time_flow where time >= %s and  time <= %s and stationID = %d" \
          % (start_time, "".join(str(time).split(':')[:3]), int(stationID))
    cur.execute(sql)
    datas = list(cur.fetchall())
    for i in range(0, len(datas), 7):
        if day_type(datas[i][1].weekday() + 1) == 0:
            data_list = [datas[i][3], datas[i + 1][3],
                         datas[i + 2][3], datas[i + 3][3], datas[i + 4][3],
                         datas[i + 5][3], datas[i + 6][3]]
            dataProcess[0].append(data_list)
        else:
            data_list = [datas[i][3], datas[i + 1][3],
                         datas[i + 2][3], datas[i + 3][3], datas[i + 4][3],
                         datas[i + 5][3], datas[i + 6][3]]
            dataProcess[1].append(data_list)
    conn.commit()
    cur.close()  # 关闭游标
    conn.close()  # 关闭连接
    return dataProcess


def get_line_data(time, lineID):
    conn = pymysql.connect(host='localhost',  # 连接名称
                           user='root',  # 用户名
                           passwd='q19723011',  # 密码
                           port=3306,  # 端口，默认为3306
                           db='month6',  # 数据库
                           charset='utf8',  # 字符编码
                           )
    cur = conn.cursor()  # 生成游标对象
    dataProcess = [[], []]
    start_time = "".join(str(time - datetime.timedelta(minutes=30)).split(':')[:3])
    sql = "select * from line_time_flow where time >= %s and  time <= %s and lineID = %d" \
          % (start_time, "".join(str(time).split(':')[:3]), int(lineID))
    cur.execute(sql)
    datas = list(cur.fetchall())
    for i in range(0, len(datas), 7):
        if day_type(datas[i][1].weekday() + 1) == 0:
            data_list = [datas[i][3], datas[i + 1][3],
                         datas[i + 2][3], datas[i + 3][3], datas[i + 4][3],
                         datas[i + 5][3], datas[i + 6][3]]
            dataProcess[0].append(data_list)
        else:
            data_list = [datas[i][3], datas[i + 1][3],
                         datas[i + 2][3], datas[i + 3][3], datas[i + 4][3],
                         datas[i + 5][3], datas[i + 6][3]]
            dataProcess[1].append(data_list)
    conn.commit()
    cur.close()  # 关闭游标
    conn.close()  # 关闭连接
    return dataProcess


def rf_train(datas):
    # 处理数据，划分训练集，测试集，归一化处理，独热编码处理
    datas = np.array(datas)
    data_case = datas[:, 0:6]  # 获取特征值
    data_label = datas[:, 6:7]  # 获取标签
    mm = MinMaxScaler()
    data_label_process = mm.fit_transform(data_label)  # 对数据归一化处理
    mm_case = MinMaxScaler()
    data_case_process = mm_case.fit_transform(data_case)  # 对数据归一化处理
    # test_data_case = data_case_process[len(data_case_process) - 1:]
    # test_data_label = data_label_process[len(data_label_process) - 1:]
    # train_data_case = data_case_process[0:len(data_case_process) - 1]
    # train_data_label = data_label_process[0:len(data_label_process) - 1]
    # 训练模型
    model = ensemble.RandomForestRegressor(max_features=6)
    model.fit(data_case_process, data_label_process.ravel())
    # show(data_label,pre)
    return model


if __name__ == '__main__':
    savemodel()
    print("完成")
