# -*- coding: utf-8 -*-

import pandas as pd
import time
import numpy as np
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
import sklearn.ensemble as ensemble
import sklearn.utils as su
from sklearn.metrics import mean_squared_error, r2_score
import joblib
import os


def getLevelFiveData_v2(count, startIndex):
    """生成5级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:

        array: 符合5级阈值范围的数据
    """
    data_set = []
    data_max_min = []

    c_19 = (np.random.rand(count) * (-8) + 8)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])

    c_20 = (np.random.rand(count) * (-1) + 1)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])

    c_21 = (np.random.rand(count) * (-40) + 120)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])

    c_22 = (np.random.rand(count) * (-20) + 20)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])

    c_23 = (np.random.rand(count) * (-120) + 800)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])

    c_24 = (np.random.rand(count) * (-0.04) + 0.2)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    c_25 = (np.random.rand(count) * (-25) + 100)
    c_25_max = np.max(c_25)
    c_25_min = np.min(c_25)
    data_max_min.append([c_25_max, c_25_min])

    c_26 = (np.random.rand(count) * (-2) + 11)
    c_26_max = np.max(c_26)
    c_26_min = np.min(c_26)
    data_max_min.append([c_26_max, c_26_min])

    c_27 = (np.random.rand(count) * (-1) + 5)
    c_27_max = np.max(c_27)
    c_27_min = np.min(c_27)
    data_max_min.append([c_27_max, c_27_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)

        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(float(c_25[i]))
        data_row.append(float(c_26[i]))
        data_row.append(float(c_27[i]))
        data_row.append(5)
        data_set.append(data_row)
    return (data_set, data_max_min)


def getLevelFourData_v2(count, startIndex):
    """生成4级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:
        array: 符合4级阈值范围的数据
    """
    data_set = []
    data_max_min = []

    c_19 = (np.random.rand(count) * (-8) + 16)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])

    c_20 = (np.random.rand(count) * (-3) + 4)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])

    c_21 = (np.random.rand(count) * (-20) + 140)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])

    c_22 = (np.random.rand(count) * (-20) + 40)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])

    c_23 = (np.random.rand(count) * (-140) + 680)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])

    c_24 = (np.random.rand(count) * (-0.03) + 0.13)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    c_25 = (np.random.rand(count) * (-20) + 75)
    c_25_max = np.max(c_25)
    c_25_min = np.min(c_25)
    data_max_min.append([c_25_max, c_25_min])

    c_26 = (np.random.rand(count) * (-2) + 9)
    c_26_max = np.max(c_26)
    c_26_min = np.min(c_26)
    data_max_min.append([c_26_max, c_26_min])

    c_27 = (np.random.rand(count) * (-1) + 4)
    c_27_max = np.max(c_27)
    c_27_min = np.min(c_27)
    data_max_min.append([c_27_max, c_27_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)

        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(float(c_25[i]))
        data_row.append(float(c_26[i]))
        data_row.append(float(c_27[i]))
        data_row.append(4)
        data_set.append(data_row)
    return (data_set, data_max_min)


def getLevelThreeData_v2(count, startIndex):
    """生成3级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:
        array: 符合3级阈值范围的数据
    """
    data_set = []
    data_max_min = []

    c_19 = (np.random.rand(count) * (-8) + 24)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])

    c_20 = (np.random.rand(count) * (-3) + 7)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])

    c_21 = (np.random.rand(count) * (-25) + 165)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])

    c_22 = (np.random.rand(count) * (-10) + 50)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])

    c_23 = (np.random.rand(count) * (-140) + 540)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])

    c_24 = (np.random.rand(count) * (-0.03) + 0.1)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    c_25 = (np.random.rand(count) * (-15) + 55)
    c_25_max = np.max(c_25)
    c_25_min = np.min(c_25)
    data_max_min.append([c_25_max, c_25_min])

    c_26 = (np.random.rand(count) * (-2) + 7)
    c_26_max = np.max(c_26)
    c_26_min = np.min(c_26)
    data_max_min.append([c_26_max, c_26_min])

    c_27 = (np.random.rand(count) * (-1) + 3)
    c_27_max = np.max(c_27)
    c_27_min = np.min(c_27)
    data_max_min.append([c_27_max, c_27_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)

        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(float(c_25[i]))
        data_row.append(float(c_26[i]))
        data_row.append(float(c_27[i]))
        data_row.append(3)
        data_set.append(data_row)
    return (data_set, data_max_min)


def getLevelTwoData_v2(count, startIndex):
    """生成2级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:
        array: 符合数据的数组
    """
    data_set = []
    data_max_min = []

    c_19 = (np.random.rand(count) * (-8) + 32)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])

    c_20 = (np.random.rand(count) * (-4) + 11)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])

    c_21 = (np.random.rand(count) * (-25) + 190)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])

    c_22 = (np.random.rand(count) * (-10) + 60)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])

    c_23 = (np.random.rand(count) * (-40) + 400)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])

    c_24 = (np.random.rand(count) * (-0.03) + 0.07)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    c_25 = (np.random.rand(count) * (-20) + 40)
    c_25_max = np.max(c_25)
    c_25_min = np.min(c_25)
    data_max_min.append([c_25_max, c_25_min])

    c_26 = (np.random.rand(count) * (-2) + 5)
    c_26_max = np.max(c_26)
    c_26_min = np.min(c_26)
    data_max_min.append([c_26_max, c_26_min])

    c_27 = (np.random.rand(count) * (-1) + 2)
    c_27_max = np.max(c_27)
    c_27_min = np.min(c_27)
    data_max_min.append([c_27_max, c_27_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)

        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(float(c_25[i]))
        data_row.append(float(c_26[i]))
        data_row.append(float(c_27[i]))
        data_row.append(2)
        data_set.append(data_row)
    return (data_set, data_max_min)


def getLevelOneData_v2(count, startIndex):
    """生成1级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:
        array: 返回符合1级阈值的数据
    """
    data_set = []
    data_max_min = []

    c_19 = (np.random.rand(count) * (-8) + 40)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])

    c_20 = (np.random.rand(count) * (-9) + 20)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])

    c_21 = (np.random.rand(count) * (-30) + 220)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])

    c_22 = (np.random.rand(count) * (-40) + 100)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])

    c_23 = (np.random.rand(count) * (-260) + 260)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])

    c_24 = (np.random.rand(count) * (-0.04) + 0.04)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    c_25 = (np.random.rand(count) * (-20) + 20)
    c_25_max = np.max(c_25)
    c_25_min = np.min(c_25)
    data_max_min.append([c_25_max, c_25_min])

    c_26 = (np.random.rand(count) * (-3) + 3)
    c_26_max = np.max(c_26)
    c_26_min = np.min(c_26)
    data_max_min.append([c_26_max, c_26_min])

    c_27 = (np.random.rand(count) * (-1) + 1)
    c_27_max = np.max(c_27)
    c_27_min = np.min(c_27)
    data_max_min.append([c_27_max, c_27_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)

        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(float(c_25[i]))
        data_row.append(float(c_26[i]))
        data_row.append(float(c_27[i]))
        data_row.append(1)
        data_set.append(data_row)
    return (data_set, data_max_min)


c_19_max = 400.0
c_19_min = 0.0

c_20_max = 200.0
c_20_min = 0.0

c_21_max = 2200.0
c_21_min = 8.0

c_22_max = 1000.0
c_22_min = 0.0

c_23_max = 8000.0
c_23_min = 0.0

c_24_max = 2.0
c_24_min = 0.0

c_25_max = 1000.0
c_25_min = 0.0

c_26_max = 110.0
c_26_min = 0.0

c_27_max = 50.0
c_27_min = 0.0


def preHandleData_L26_v2(data):
    """归一化处理数据，29位数组

    Args:
        data (array): 要处理的数据，每一条为26个元素的数据，要处理的为下标1~24的数据

    Returns:
        [array]: 处理好的数据
    """
    data_row = []
    data_row.append(data[0])

    data_row.append((c_19_max - data[1])/(c_19_max - c_19_min))
    data_row.append((c_20_max - data[2])/(c_20_max - c_20_min))
    data_row.append((c_21_max - data[3])/(c_21_max - c_21_min))
    data_row.append((c_22_max - data[4])/(c_22_max - c_22_min))
    data_row.append((data[5] - c_23_min)/(c_23_max - c_23_min))
    data_row.append((data[6] - c_24_min)/(c_24_max - c_24_min))
    data_row.append((data[7] - c_25_min)/(c_25_max - c_25_min))
    data_row.append((data[8] - c_26_min)/(c_26_max - c_26_min))
    data_row.append((data[9] - c_27_min)/(c_27_max - c_27_min))
    data_row.append(data[10])
    return data_row


def preHandleData_v2(data):
    """归一化处理数据，24位数组

    Args:
        data (array): 要处理的数据，27位

    Returns:
        array: 处理好的数据
    """
    data_res = []

    data_res.append((c_19_max - data[0])/(c_19_max - c_19_min))
    data_res.append((c_20_max - data[1])/(c_20_max - c_20_min))
    data_res.append((c_21_max - data[2])/(c_21_max - c_21_min))
    data_res.append((c_22_max - data[3])/(c_22_max - c_22_min))
    data_res.append((data[4] - c_23_min)/(c_23_max - c_23_min))
    data_res.append((data[5] - c_24_min)/(c_24_max - c_24_min))
    data_res.append((data[6] - c_25_min)/(c_25_max - c_25_min))
    data_res.append((data[7] - c_26_min)/(c_26_max - c_26_min))
    data_res.append((data[8] - c_27_min)/(c_27_max - c_27_min))
    return data_res


def get_rfr_param_grid():
    """网格训练参数，rfr，一般参数

    Returns:
        [type]: [description]
    """
    param_grid = {
        'criterion': ['mae', 'mse'],
        'n_estimators': [800, 1000],
        'max_features': ['sqrt'],
        'max_depth': [4, 5],
        'min_samples_split': [8, 12, 16]}

    param_grid_2 = {
        'criterion': ['mae', 'mse'],
        'n_estimators': [800, 1000],
        'max_features': ['sqrt']}
    return param_grid_2


def get_rfr_best_param_grid():
    param_grid = {
        'criterion': ['mae'],
        'n_estimators': [1000],
        'max_features': ['sqrt'],
        'max_depth': [4],
        'min_samples_split': [12]}
    return param_grid


def get_train_data():
    """生成训练数据

    Returns:
        [[],[]...]: 生成的随机样本数据
    """
    data_lv5, max_min_lv5 = getLevelFiveData_v2(100, 1)
    data_lv4, max_min_lv4 = getLevelFourData_v2(100, 101)
    data_lv3, max_min_lv3 = getLevelThreeData_v2(100, 201)
    data_lv2, max_min_lv2 = getLevelTwoData_v2(100, 301)
    data_lv1, max_min_lv1 = getLevelOneData_v2(100, 401)
    data_all = []

    for item in data_lv5:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)

    for item in data_lv4:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)

    for item in data_lv3:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)

    for item in data_lv2:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)

    for item in data_lv1:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)

    return data_all


def save_model(model):
    return 0


# 26位数组的对应列名
column_names = ['index', 'c_19', 'c_20', 'c_21', 'c_22',
                'c_23', 'c_24', 'c_25', 'c_26', 'c_27', 'level']

# def train():
#     result = {
#         'id': [],
#         'start': [],
#         'end': [],
#         'start_cross_val': [],
#         'end_cross_val': [],
#         'MSE': [],
#         'MSE_2': [],
#         'RMSE': [],
#         'R2': [],
#         'NMSE': [],
#         'Lv_5': [],
#         'Lv_4': [],
#         'Lv_3': [],
#         'Lv_2': [],
#         'Lv_1': [],
#         'pre_2015': [],
#         'pre_2020': [],
#         'pre_2030': []
#     }

#     # 创建随机森林回归模型
#     rfr = ensemble.RandomForestRegressor()

#     # 创建网格搜索模型
#     rfr_cv = GridSearchCV(estimator=rfr, param_grid=get_rfr_best_param_grid(), cv=5)
#     print("创建模型")

#     temp_5 = []
#     temp_4 = []
#     temp_3 = []
#     temp_2 = []

#     for i in range(100):
#         print(str.format("=== 第 {0} 次运行 ===", i))
#         # 创建训练数据
#         data_init = get_train_data()
#         df = pd.DataFrame(data_init, columns=column_names)
#         Y = df['level']
#         X = df.iloc[:, 1:-1]
#         print("初始化数据")

#         # 打乱
#         X, Y = su.shuffle(X, Y ,random_state=7)
#         print("打乱数据")

#         X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state=123)
#         # 切分数据

#         start_time = time.localtime()
#         print(str.format("开始训练：{0}", time.asctime(start_time)))
#         # 开始训练
#         rfr_cv.fit(X_train, Y_train)
#         # 训练结束
#         end_time = time.localtime()
#         print(str.format("结束训练：{0}", time.asctime(end_time)))

#         start_cross_val_time = time.localtime()
#         print(str.format("开始交叉验证：{0}", time.asctime(start_cross_val_time)))
#         score = cross_val_score(rfr_cv, X_test, Y_test, scoring='neg_mean_squared_error', cv=10).mean()
#         end_cross_val_time = time.localtime()
#         print(str.format("结束交叉验证：{0}", time.asctime(end_cross_val_time)))

#         # 预测
#         Y_pred = rfr_cv.predict(X_test)

#         # 计算精度
#         MSE = mean_squared_error(Y_test, Y_pred)
#         MSE_2 = np.mean((Y_test - Y_pred)**2)
#         RMSE = np.sqrt(mean_squared_error(Y_test, Y_pred))
#         R2 = r2_score(Y_test, Y_pred)

#         Y_test_mean = np.array([np.mean(Y_test)])
#         NMSE = (np.sum((Y_pred-Y_test)**2))/(np.sum((Y_test-Y_test_mean)**2))

#         print("MSE: " + str(MSE))
#         print("MSE_2: " + str(MSE_2))
#         print("RMSE: " + str(RMSE))
#         print("R2: " + str(R2))
#         print("NMSE: " + str(NMSE))

#         # 模拟计算
#         data_limit_5 = [20, 0.95, 80, 5000, 95, 95, 20, 90, 90, 2, 80, 10, 90, 20, 90, 0.7, 95, 80, 90, 90, 95, 90, 80, 90]
#         data_limit_4 = [40, 0.9, 60, 4000, 80, 80, 15, 75, 80, 4, 60, 20, 80, 40, 80, 0.6, 80, 60, 80, 80, 85, 80, 60, 80]
#         data_limit_3 = [60, 0.85, 40, 3000, 60, 60, 10, 60, 70, 6, 40, 40, 70, 60, 70, 0.5, 60, 40, 70, 70, 70, 70, 40, 60]
#         data_limit_2 = [80, 0.8, 20, 2000, 40, 40, 5, 30, 50, 10, 20, 60, 50, 80, 50, 0.4, 40, 20, 40, 40, 40, 50, 20, 30]

#         data_limit_5 = preHandleData_v2(data_limit_5)
#         data_limit_4 = preHandleData_v2(data_limit_4)
#         data_limit_3 = preHandleData_v2(data_limit_3)
#         data_limit_2 = preHandleData_v2(data_limit_2)

#         pre_5 = rfr_cv.predict([data_limit_5])
#         pre_4 = rfr_cv.predict([data_limit_4])
#         pre_3 = rfr_cv.predict([data_limit_3])
#         pre_2 = rfr_cv.predict([data_limit_2])

#         temp_5.append(pre_5)
#         temp_4.append(pre_4)
#         temp_3.append(pre_3)
#         temp_2.append(pre_2)

#         print("Lv.5: >" + str(pre_5[0]))
#         print("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]")
#         print("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]")
#         print("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]")
#         print("Lv.1: <" + str(pre_2[0]))

#         # 计算2015，2020，2030
#         data_2015 = [7.0, 0.964, 51, 4781, 80, 75, 5, 50, 60, 4.5, 80, 30, 100, 60, 80, 0.52, 80, 70, 60, 50, 85, 70, 40, 50]
#         data_2020 = [8.1, 0.946, 49, 4275, 90, 80, 8, 75, 80, 4.8, 85, 35, 100, 42, 95, 0.60, 90, 80, 80, 80, 90, 80, 70, 80]
#         data_2030 = [8.5, 0.913, 47, 4068, 100, 95, 10, 85, 95, 5.5, 90, 40, 100, 30, 100, 0.70, 95, 90, 95, 95, 95, 95, 90, 95]

#         data_2015 = preHandleData_v2(data_2015)
#         data_2020 = preHandleData_v2(data_2020)
#         data_2030 = preHandleData_v2(data_2030)

#         data_predict_2015 = rfr_cv.predict([data_2015])
#         data_predict_2020 = rfr_cv.predict([data_2020])
#         data_predict_2030 = rfr_cv.predict([data_2030])

#         print("2015: " + str(data_predict_2015[0]))
#         print("2020: " + str(data_predict_2020[0]))
#         print("2030: " + str(data_predict_2030[0]))

#         # 保存结果
#         result['id'].append(i)
#         result['start'].append(time.strftime("%Y-%m-%d %H:%M:%S", start_time))
#         result['end'].append(time.strftime("%Y-%m-%d %H:%M:%S", end_time))
#         result['start_cross_val'].append(time.strftime("%Y-%m-%d %H:%M:%S", start_cross_val_time))
#         result['end_cross_val'].append(time.strftime("%Y-%m-%d %H:%M:%S", end_cross_val_time))
#         result['MSE'].append(MSE)
#         result['MSE_2'].append(MSE_2)
#         result['RMSE'].append(RMSE)
#         result['R2'].append(R2)
#         result['NMSE'].append(NMSE)
#         result['Lv_5'].append(("Lv.5: >" + str(pre_5[0])))
#         result['Lv_4'].append(("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]"))
#         result['Lv_3'].append(("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]"))
#         result['Lv_2'].append(("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]"))
#         result['Lv_1'].append(("Lv.1: <" + str(pre_2[0])))
#         result['pre_2015'].append(data_predict_2015[0])
#         result['pre_2020'].append(data_predict_2020[0])
#         result['pre_2030'].append(data_predict_2030[0])

#     result['id'].append(0)
#     result['start'].append("")
#     result['end'].append("")
#     result['start_cross_val'].append("")
#     result['end_cross_val'].append("")
#     result['MSE'].append(0.0)
#     result['MSE_2'].append(0.0)
#     result['RMSE'].append(0.0)
#     result['R2'].append(0.0)
#     result['NMSE'].append(0.0)
#     result['Lv_5'].append(("Lv.5: >" + str(np.mean(temp_5))))
#     result['Lv_4'].append(("Lv.4: (" + str(np.mean(temp_4)) + ", " + str(np.mean(temp_5)) + "]"))
#     result['Lv_3'].append(("Lv.3: (" + str(np.mean(temp_3)) + ", " + str(np.mean(temp_4)) + "]"))
#     result['Lv_2'].append(("Lv.2: (" + str(np.mean(temp_2)) + ", " + str(np.mean(temp_3)) + "]"))
#     result['Lv_1'].append(("Lv.1: <=" + str(np.mean(temp_2))))
#     result['pre_2015'].append(np.mean(result['pre_2015']))
#     result['pre_2020'].append(np.mean(result['pre_2020']))
#     result['pre_2030'].append(np.mean(result['pre_2030']))

#     df_result = pd.DataFrame.from_dict(result)
#     df_result.to_csv('result.csv', index=True, header=True, sep=',', encoding='utf-8')

#     # 保存模型
#     joblib.dump(rfr_cv, 'rfr_cv.joblib')

# def train_v2():
#     result = {
#         'id': [],
#         'start': [],
#         'end': [],
#         'start_cross_val': [],
#         'end_cross_val': [],
#         'MSE': [],
#         'MSE_2': [],
#         'RMSE': [],
#         'R2': [],
#         'NMSE': [],
#         'Lv_5': [],
#         'Lv_4': [],
#         'Lv_3': [],
#         'Lv_2': [],
#         'Lv_1': [],
#         'pre_2015': [],
#         'pre_2020': [],
#         'pre_2030': []
#     }

#     temp_5 = []
#     temp_4 = []
#     temp_3 = []
#     temp_2 = []

#     # 模拟计算
#     data_limit_5 = [20, 0.95, 80, 5000, 95, 95, 20, 90, 90, 2, 80, 10, 90, 20, 90, 0.7, 95, 80, 90, 90, 95, 90, 80, 90]
#     data_limit_4 = [40, 0.9, 60, 4000, 80, 80, 15, 75, 80, 4, 60, 20, 80, 40, 80, 0.6, 80, 60, 80, 80, 85, 80, 60, 80]
#     data_limit_3 = [60, 0.85, 40, 3000, 60, 60, 10, 60, 70, 6, 40, 40, 70, 60, 70, 0.5, 60, 40, 70, 70, 70, 70, 40, 60]
#     data_limit_2 = [80, 0.8, 20, 2000, 40, 40, 5, 30, 50, 10, 20, 60, 50, 80, 50, 0.4, 40, 20, 40, 40, 40, 50, 20, 30]

#     data_limit_5 = preHandleData_v2(data_limit_5)
#     data_limit_4 = preHandleData_v2(data_limit_4)
#     data_limit_3 = preHandleData_v2(data_limit_3)
#     data_limit_2 = preHandleData_v2(data_limit_2)

#     data_2015 = [7.0, 0.964, 51, 4781, 80, 75, 5, 50, 60, 4.5, 80, 30, 100, 60, 80, 0.52, 80, 70, 60, 50, 85, 70, 40, 50]
#     data_2020 = [8.1, 0.946, 49, 4275, 90, 80, 8, 75, 80, 4.8, 85, 35, 100, 42, 95, 0.60, 90, 80, 80, 80, 90, 80, 70, 80]
#     data_2030 = [8.5, 0.913, 47, 4068, 100, 95, 10, 85, 95, 5.5, 90, 40, 100, 30, 100, 0.70, 95, 90, 95, 95, 95, 95, 90, 95]

#     data_2015 = preHandleData_v2(data_2015)
#     data_2020 = preHandleData_v2(data_2020)
#     data_2030 = preHandleData_v2(data_2030)


#     model_filename = "rfr_v2.joblib"
#     result_filename = "result_v2.csv"
#     rfr_cv = None
#     if(os.path.exists(model_filename) and os.path.isfile(model_filename)):
#         ReLoadParam = input("Reload Params ? y/n")
#         if(ReLoadParam == "y"):
#             rfr = ensemble.RandomForestRegressor()
#             rfr_cv = GridSearchCV(estimator=rfr, param_grid=get_rfr_param_grid(), cv=5)
#             print("创建新的模型, 新参数")
#         else:
#             rfr_cv=joblib.load(model_filename)
#             print("载入已训练的模型")
#     else:
#         rfr = ensemble.RandomForestRegressor()
#         rfr_cv = GridSearchCV(estimator=rfr, param_grid=get_rfr_param_grid(), cv=5)
#         print("创建新的模型")

#     # 训练数据
#     for i in range(100):
#         print(str.format("=== 第 {0} 次运行 ===", i))
#         # 创建训练数据
#         data_init = get_train_data()
#         df = pd.DataFrame(data_init, columns=column_names)
#         Y = df['level']
#         X = df.iloc[:, 1:-1]
#         print("初始化数据")

#         # 打乱
#         X, Y = su.shuffle(X, Y ,random_state=7)
#         print("打乱数据")

#         X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state=123)

#         start_time = time.localtime()
#         print(str.format("开始训练：{0}", time.asctime(start_time)))
#         # 开始训练
#         rfr_cv.fit(X_train, Y_train)
#         # 训练结束
#         end_time = time.localtime()
#         print(str.format("结束训练：{0}", time.asctime(end_time)))

#         start_cross_val_time = time.localtime()
#         print(str.format("开始交叉验证：{0}", time.asctime(start_cross_val_time)))
#         score = cross_val_score(rfr_cv, X_test, Y_test, scoring='neg_mean_squared_error', cv=10).mean()
#         end_cross_val_time = time.localtime()
#         print(str.format("结束交叉验证：{0}", time.asctime(end_cross_val_time)))

#         # 预测
#         Y_pred = rfr_cv.predict(X_test)

#         # 计算精度
#         MSE = mean_squared_error(Y_test, Y_pred)
#         MSE_2 = np.mean((Y_test - Y_pred)**2)
#         RMSE = np.sqrt(mean_squared_error(Y_test, Y_pred))
#         R2 = r2_score(Y_test, Y_pred)

#         Y_test_mean = np.array([np.mean(Y_test)])
#         NMSE = (np.sum((Y_pred-Y_test)**2))/(np.sum((Y_test-Y_test_mean)**2))

#         print("MSE: " + str(MSE))
#         print("MSE_2: " + str(MSE_2))
#         print("RMSE: " + str(RMSE))
#         print("R2: " + str(R2))
#         print("NMSE: " + str(NMSE))

#         print(rfr_cv.best_params_)

#         result['id'].append(i)
#         result['start'].append(time.strftime("%Y-%m-%d %H:%M:%S", start_time))
#         result['end'].append(time.strftime("%Y-%m-%d %H:%M:%S", end_time))
#         result['start_cross_val'].append(time.strftime("%Y-%m-%d %H:%M:%S", start_cross_val_time))
#         result['end_cross_val'].append(time.strftime("%Y-%m-%d %H:%M:%S", end_cross_val_time))
#         result['MSE'].append(MSE)
#         result['MSE_2'].append(MSE_2)
#         result['RMSE'].append(RMSE)
#         result['R2'].append(R2)
#         result['NMSE'].append(NMSE)

#         pre_5 = rfr_cv.predict([data_limit_5])
#         pre_4 = rfr_cv.predict([data_limit_4])
#         pre_3 = rfr_cv.predict([data_limit_3])
#         pre_2 = rfr_cv.predict([data_limit_2])

#         temp_5.append(pre_5)
#         temp_4.append(pre_4)
#         temp_3.append(pre_3)
#         temp_2.append(pre_2)

#         print("Lv.5: >" + str(pre_5[0]))
#         print("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]")
#         print("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]")
#         print("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]")
#         print("Lv.1: <" + str(pre_2[0]))

#         data_predict_2015 = rfr_cv.predict([data_2015])
#         data_predict_2020 = rfr_cv.predict([data_2020])
#         data_predict_2030 = rfr_cv.predict([data_2030])

#         print("2015: " + str(data_predict_2015[0]))
#         print("2020: " + str(data_predict_2020[0]))
#         print("2030: " + str(data_predict_2030[0]))

#         result['Lv_5'].append(("Lv.5: >" + str(pre_5[0])))
#         result['Lv_4'].append(("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]"))
#         result['Lv_3'].append(("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]"))
#         result['Lv_2'].append(("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]"))
#         result['Lv_1'].append(("Lv.1: <" + str(pre_2[0])))
#         result['pre_2015'].append(data_predict_2015[0])
#         result['pre_2020'].append(data_predict_2020[0])
#         result['pre_2030'].append(data_predict_2030[0])


#         if(data_predict_2030[0] <= pre_5[0]):
#             continue

#         input_string = input("是否继续？y/n")
#         if(input_string == "n"):
#             break

#     result['id'].append(0)
#     result['start'].append("")
#     result['end'].append("")
#     result['start_cross_val'].append("")
#     result['end_cross_val'].append("")
#     result['MSE'].append(0.0)
#     result['MSE_2'].append(0.0)
#     result['RMSE'].append(0.0)
#     result['R2'].append(0.0)
#     result['NMSE'].append(0.0)
#     result['Lv_5'].append(("Lv.5: >" + str(np.mean(temp_5))))
#     result['Lv_4'].append(("Lv.4: (" + str(np.mean(temp_4)) + ", " + str(np.mean(temp_5)) + "]"))
#     result['Lv_3'].append(("Lv.3: (" + str(np.mean(temp_3)) + ", " + str(np.mean(temp_4)) + "]"))
#     result['Lv_2'].append(("Lv.2: (" + str(np.mean(temp_2)) + ", " + str(np.mean(temp_3)) + "]"))
#     result['Lv_1'].append(("Lv.1: <=" + str(np.mean(temp_2))))
#     result['pre_2015'].append(np.mean(result['pre_2015']))
#     result['pre_2020'].append(np.mean(result['pre_2020']))
#     result['pre_2030'].append(np.mean(result['pre_2030']))

#     df_result = pd.DataFrame.from_dict(result)
#     df_result.to_csv(result_filename, index=True, header=True, sep=',', encoding='utf-8')

#     # 保存模型
#     joblib.dump(rfr_cv, model_filename)

def train_v3(rfr_model):
    # 准备数据
    data_init = get_train_data()
    df = pd.DataFrame(data_init, columns=column_names)
    Y = df["level"]
    X = df.iloc[:, 1:-1]

    X, Y = su.shuffle(X, Y, random_state=7)
    X_train, X_test, Y_train, Y_test = train_test_split(
        X, Y, test_size=0.4, random_state=123)

    # 训练
    start_time = time.localtime()
    print(str.format("开始训练：{0}", time.asctime(start_time)))
    rfr_model.fit(X_train, Y_train)
    end_time = time.localtime()
    print(str.format("结束训练：{0}", time.asctime(end_time)))

    start_cross_val_time = time.localtime()
    print(str.format("开始交叉验证：{0}", time.asctime(start_cross_val_time)))
    score = cross_val_score(rfr_model, X_test, Y_test,
                            scoring='neg_mean_squared_error', cv=10).mean()
    end_cross_val_time = time.localtime()
    print(str.format("结束交叉验证：{0}", time.asctime(end_cross_val_time)))

    Y_pred = rfr_model.predict(X_test)

    # 计算精度
    MSE = mean_squared_error(Y_test, Y_pred)
    MSE_2 = np.mean((Y_test - Y_pred)**2)
    RMSE = np.sqrt(mean_squared_error(Y_test, Y_pred))
    R2 = r2_score(Y_test, Y_pred)

    Y_test_mean = np.array([np.mean(Y_test)])
    NMSE = (np.sum((Y_pred-Y_test)**2))/(np.sum((Y_test-Y_test_mean)**2))

    print("MSE: " + str(MSE))
    print("MSE_2: " + str(MSE_2))
    print("RMSE: " + str(RMSE))
    print("R2: " + str(R2))
    print("NMSE: " + str(NMSE))

    return rfr_model


rfr_file_name = "rfr.joblib"
rfr_result_name = "rfr_result.csv"


def get_model():
    get_mode = input("1.Load exists, 2.Build new: ")
    if get_mode == "1":
        file_name = input("Input saved .joblib name: ")
        exists_old = os.path.exists(file_name)
        if(exists_old):
            print("Load exists model")
            return joblib.load(file_name)
        else:
            print("Not found!Build new")
            return GridSearchCV(estimator=ensemble.RandomForestRegressor(), param_grid=get_rfr_best_param_grid(), cv=5)
    elif get_mode == "2":
        print("Build new model")
        return GridSearchCV(estimator=ensemble.RandomForestRegressor(), param_grid=get_rfr_best_param_grid(), cv=5)
    else:
        print("Wrong input!Build new model")
        return GridSearchCV(estimator=ensemble.RandomForestRegressor(), param_grid=get_rfr_best_param_grid(), cv=5)

    # exists_old = os.path.exists(rfr_file_name)
    # if(exists_old):
    #     build_new = input("Exists a model, build a new? y/n:")
    #     if(build_new == "n"):
    #         print("Load exists model")
    #         return joblib.load(rfr_file_name)
    # print("Build new model")
    # return GridSearchCV(estimator=ensemble.RandomForestRegressor(), param_grid=get_rfr_best_param_grid(), cv=5)


def predict_data(rfr_model):
    # 模拟计算
    data_limits = pd.DataFrame(pd.read_csv(".\\limit_data.csv", header=0))
    data_limit_array = data_limits.iloc[:, 18:28].values

    data_limit_1_1 = preHandleData_v2(data_limit_array[0])
    data_limit_1_2 = preHandleData_v2(data_limit_array[1])
    data_limit_2_1 = preHandleData_v2(data_limit_array[2])
    data_limit_2_2 = preHandleData_v2(data_limit_array[3])
    data_limit_3_1 = preHandleData_v2(data_limit_array[4])
    data_limit_3_2 = preHandleData_v2(data_limit_array[5])
    data_limit_4_1 = preHandleData_v2(data_limit_array[6])
    data_limit_4_2 = preHandleData_v2(data_limit_array[7])
    data_limit_5_1 = preHandleData_v2(data_limit_array[8])
    data_limit_5_2 = preHandleData_v2(data_limit_array[9])

    pre_1_1 = rfr_model.predict([data_limit_1_1])
    pre_1_2 = rfr_model.predict([data_limit_1_2])
    pre_2_1 = rfr_model.predict([data_limit_2_1])
    pre_2_2 = rfr_model.predict([data_limit_2_2])
    pre_3_1 = rfr_model.predict([data_limit_3_1])
    pre_3_2 = rfr_model.predict([data_limit_3_2])
    pre_4_1 = rfr_model.predict([data_limit_4_1])
    pre_4_2 = rfr_model.predict([data_limit_4_2])
    pre_5_1 = rfr_model.predict([data_limit_5_1])
    pre_5_2 = rfr_model.predict([data_limit_5_2])

    pre_set = [
        [1, pre_1_1[0], pre_1_2[0]],
        [2, pre_2_1[0], pre_2_2[0]],
        [3, pre_3_1[0], pre_3_2[0]],
        [4, pre_4_1[0], pre_4_2[0]],
        [5, pre_5_1[0], pre_5_2[0]],
    ]

    df_limit = pd.DataFrame(pre_set, columns=["level", "min", "max"])

    print("Lv.5: (" + str(pre_5_1[0]) + ", " + str(pre_5_2[0]) + "]")
    print("Lv.4: (" + str(pre_4_1[0]) + ", " + str(pre_4_2[0]) + "]")
    print("Lv.3: (" + str(pre_3_1[0]) + ", " + str(pre_3_2[0]) + "]")
    print("Lv.2: (" + str(pre_2_1[0]) + ", " + str(pre_2_2[0]) + "]")
    print("Lv.1: <" + str(pre_1_1[0]) + ", " + str(pre_1_2[0]) + "]")

    # 计算每年每城市
    city_data = pd.DataFrame(pd.read_csv("city_data.csv", header=0))
    city_data_array = city_data.values

    result_array = []

    for city in city_data_array:
        d = city[20:29]
        d = preHandleData_v2(d)
        d_pre = rfr_model.predict([d])
        d_lv = 0
        if(d_pre[0] > pre_1_1[0] and d_pre[0] <= pre_1_2[0]):
            d_lv = 1
        elif(d_pre[0] > pre_2_1[0] and d_pre[0] <= pre_2_2[0]):
            d_lv = 2
        elif(d_pre[0] > pre_3_1[0] and d_pre[0] <= pre_3_2[0]):
            d_lv = 3
        elif(d_pre[0] > pre_4_1[0] and d_pre[0] <= pre_4_2[0]):
            d_lv = 4
        elif(d_pre[0] > pre_5_1[0] and d_pre[0] <= pre_5_2[0]):
            d_lv = 5
        result_array.append([city[0], city[1], d_pre[0], d_lv])

    df_city = pd.DataFrame(result_array, columns=[
                           "city", "year", "score", "level"])

    return (df_limit, df_city)


def train_v4(rfr_model):
    # 准备数据
    data_init = get_train_data()
    df = pd.DataFrame(data_init, columns=column_names)
    Y = df["level"]
    X = df.iloc[:, 1:-1]

    X, Y = su.shuffle(X, Y, random_state=7)
    X_train, X_test, Y_train, Y_test = train_test_split(
        X, Y, test_size=0.4, random_state=123)

    # 训练
    start_time = time.localtime()
    print(str.format("开始训练：{0}", time.asctime(start_time)))
    rfr_model.fit(X_train, Y_train)
    end_time = time.localtime()
    print(str.format("结束训练：{0}", time.asctime(end_time)))

    Y_pred = rfr_model.predict(X_test)

    # 计算精度
    MSE = mean_squared_error(Y_test, Y_pred)
    MSE_2 = np.mean((Y_test - Y_pred)**2)
    RMSE = np.sqrt(mean_squared_error(Y_test, Y_pred))
    R2 = r2_score(Y_test, Y_pred)

    Y_test_mean = np.array([np.mean(Y_test)])
    NMSE = (np.sum((Y_pred-Y_test)**2))/(np.sum((Y_test-Y_test_mean)**2))

    print("MSE: " + str(MSE))
    print("MSE_2: " + str(MSE_2))
    print("RMSE: " + str(RMSE))
    print("R2: " + str(R2))
    print("NMSE: " + str(NMSE))

    res_column_names = ["MSE", "MSE_2", "RMSE", "R2", "NMSE"]
    df_mertic = pd.DataFrame(
        [[MSE, MSE_2, RMSE, R2, NMSE]], columns=res_column_names)

    # 模拟计算
    data_limits = pd.DataFrame(pd.read_csv(".\\limit_data.csv", header=0))
    data_limit_array = data_limits.values

    data_limit_1_1 = preHandleData_v2(data_limit_array[0])
    data_limit_1_2 = preHandleData_v2(data_limit_array[1])
    data_limit_2_1 = preHandleData_v2(data_limit_array[2])
    data_limit_2_2 = preHandleData_v2(data_limit_array[3])
    data_limit_3_1 = preHandleData_v2(data_limit_array[4])
    data_limit_3_2 = preHandleData_v2(data_limit_array[5])
    data_limit_4_1 = preHandleData_v2(data_limit_array[6])
    data_limit_4_2 = preHandleData_v2(data_limit_array[7])
    data_limit_5_1 = preHandleData_v2(data_limit_array[8])
    data_limit_5_2 = preHandleData_v2(data_limit_array[9])

    pre_1_1 = rfr_model.predict([data_limit_1_1])
    pre_1_2 = rfr_model.predict([data_limit_1_2])
    pre_2_1 = rfr_model.predict([data_limit_2_1])
    pre_2_2 = rfr_model.predict([data_limit_2_2])
    pre_3_1 = rfr_model.predict([data_limit_3_1])
    pre_3_2 = rfr_model.predict([data_limit_3_2])
    pre_4_1 = rfr_model.predict([data_limit_4_1])
    pre_4_2 = rfr_model.predict([data_limit_4_2])
    pre_5_1 = rfr_model.predict([data_limit_5_1])
    pre_5_2 = rfr_model.predict([data_limit_5_2])

    pre_set = [[
        pre_1_1[0],
        pre_1_2[0],
        pre_2_1[0],
        pre_2_2[0],
        pre_3_1[0],
        pre_3_2[0],
        pre_4_1[0],
        pre_4_2[0],
        pre_5_1[0],
        pre_5_2[0],
    ]]

    df_limit = pd.DataFrame(pre_set, columns=[
                            "Lv1_min", "Lv1_max", "Lv2_min", "Lv2_max", "Lv3_min", "Lv3_max", "Lv4_min", "Lv4_max", "Lv5_min", "Lv5_max"])

    # 计算每年每城市
    city_data = pd.DataFrame(pd.read_csv("city_data.csv", header=0))
    city_data_array = city_data.values

    result_array = []

    for city in city_data_array:
        d = city[20:29]
        d = preHandleData_v2(d)
        d_pre = rfr_model.predict([d])
        d_lv = 0
        # if(d_pre[0] > pre_1_1[0] and d_pre[0] <= pre_1_2[0]):
        #     d_lv = 1
        # elif(d_pre[0] > pre_2_1[0] and d_pre[0] <= pre_2_2[0]):
        #     d_lv = 2
        # elif(d_pre[0] > pre_3_1[0] and d_pre[0] <= pre_3_2[0]):
        #     d_lv = 3
        # elif(d_pre[0] > pre_4_1[0] and d_pre[0] <= pre_4_2[0]):
        #     d_lv = 4
        # elif(d_pre[0] > pre_5_1[0] and d_pre[0] <= pre_5_2[0]):
        #     d_lv = 5
        result_array.append([city[0], city[1], d_pre[0]])

    df_city = pd.DataFrame(result_array, columns=["city", "year", "score"])

    return (df_limit, df_city, df_mertic)


def random_50():
    rfr = GridSearchCV(estimator=ensemble.RandomForestRegressor(),
                       param_grid=get_rfr_best_param_grid(), cv=5)

    df_limits = pd.DataFrame([], columns=["Lv1_min", "Lv1_max", "Lv2_min", "Lv2_max",
                                          "Lv3_min", "Lv3_max", "Lv4_min", "Lv4_max", "Lv5_min", "Lv5_max"])
    df_cities = pd.DataFrame()
    df_mertics = pd.DataFrame(
        [], columns=["MSE", "MSE_2", "RMSE", "R2", "NMSE"])

    for i in range(50):
        print("=======time: " + str(i) + "==========")
        df_limit, df_city, df_mertic = train_v4(rfr)
        if(i == 0):
            df_cities['city'] = df_city['city']
            df_cities['year'] = df_city['year']
        df_limits = df_limits.append(df_limit, ignore_index=True)
        df_cities[i] = df_city['score']
        df_mertics = df_mertics.append(df_mertic, ignore_index=True)

    df_limits_col = df_limits[["Lv1_min", "Lv1_max", "Lv2_min", "Lv2_max",
                               "Lv3_min", "Lv3_max", "Lv4_min", "Lv4_max", "Lv5_min", "Lv5_max"]]
    df_limits = df_limits.append(df_limits_col.mean(axis=0), ignore_index=True)

    temp_cities = df_cities.iloc[:, 2:]
    df_cities["avg"] = temp_cities.mean(axis=1)

    df_mertics_col = df_mertics[["MSE", "MSE_2", "RMSE", "R2", "NMSE"]]
    df_mertics = df_mertics.append(
        df_mertics_col.mean(axis=0), ignore_index=True)

    res_filename = input("Input filename to save result:")
    writer = pd.ExcelWriter(res_filename)
    df_limits.to_excel(writer, sheet_name="limit")
    df_cities.to_excel(writer, sheet_name="city")
    df_mertics.to_excel(writer, sheet_name="mertics")
    writer.save()
    writer.close()


def main():
    run_mode = input("1,step by step  2,50 times: ")
    if(run_mode == "1"):
        rfr = get_model()

        KEEP_GOING = True

        while KEEP_GOING:
            tarin_start = input("Train model? y/n:")
            if(tarin_start == "y"):
                rfr = train_v3(rfr)
                print(rfr.best_params_)

                # Save
                save_model = input("Save Model? y/n:")
                if(save_model == "y"):
                    model_name = input("Filename? :")
                    model_name = model_name + ".joblib"
                    joblib.dump(rfr, model_name)
                    print("save into " + model_name)

            # 预测数据
            p_limit, p_city = predict_data(rfr)

            res_filename = input("Input filename to save result:")
            writer = pd.ExcelWriter(res_filename)
            p_limit.to_excel(writer, sheet_name="limit")
            p_city.to_excel(writer, sheet_name="city")
            writer.save()
            writer.close()

            keep_go = input("Continue to next round? y/n:")
            if(keep_go == "y"):
                KEEP_GOING = True
            else:
                KEEP_GOING = False
    else:
        random_50()


def main_v2():
    print("Select Run Mode: ")
    print("1: 连续运行")
    print("2: 单次运行")
    run_mode = input()
    if(run_mode == "1"):
        print("MODE 1")
        run_count = input("Input Run Count")
        rfr = get_model()
        while run_count > 0:
            rfr = train_v3(rfr)

    elif(run_mode == "2"):
        print("MODE 2")

    else:
        print("Wrong Input")


if __name__ == "__main__":
    main()
    # train_v2();
    print("Finish!")
