# -*- coding: utf-8 -*-

import pandas as pd
import time
import numpy as np
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
import sklearn.ensemble as ensemble
import sklearn.utils as su
from sklearn.metrics import mean_squared_error, r2_score
import joblib
import os


def getLevelFiveData_v2(count, startIndex):
    """生成5级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:
        array: 符合5级阈值范围的数据
    """
    data_set = []
    data_max_min = []
    c_1 = (np.random.rand(count) * 20.0)
    c_1_max = np.max(c_1)
    c_1_min = np.min(c_1)
    data_max_min.append([c_1_max, c_1_min])
    
    c_2 = (np.random.rand(count) * 8.55 + 0.95)
    c_2_max = np.max(c_2)
    c_2_min = np.min(c_2)
    data_max_min.append([c_2_max, c_2_min])
    
    c_3 = (np.random.rand(count) * 720 + 80)
    c_3_max = np.max(c_3)
    c_3_min = np.min(c_3)
    data_max_min.append([c_3_max, c_3_min])
    
    c_4 = (np.random.rand(count) * 45000 + 5000)
    c_4_max = np.max(c_4)
    c_4_min = np.min(c_4)
    data_max_min.append([c_4_max, c_4_min])
    
    c_5 = (np.random.rand(count) * 855 + 95)
    c_5_max = np.max(c_5)
    c_5_min = np.min(c_5)
    data_max_min.append([c_5_max, c_5_min])
    
    c_6 = (np.random.rand(count) * 855 + 95)
    c_6_max = np.max(c_6)
    c_6_min = np.min(c_6)
    data_max_min.append([c_6_max, c_6_min])
    
    c_7 = (np.random.rand(count) * 180 + 20)
    c_7_max = np.max(c_7)
    c_7_min = np.min(c_7)
    data_max_min.append([c_7_max, c_7_min])
    
    c_8 = (np.random.rand(count) * 810 + 90)
    c_8_max = np.max(c_8)
    c_8_min = np.min(c_8)
    data_max_min.append([c_8_max, c_8_min])
    
    c_9 = (np.random.rand(count) * 810 + 90)
    c_9_max = np.max(c_9)
    c_9_min = np.min(c_9)
    data_max_min.append([c_9_max, c_9_min])
    
    c_10 = (np.random.rand(count) * 1.8 + 0.2)
    c_10_max = np.max(c_10)
    c_10_min = np.min(c_10)
    data_max_min.append([c_10_max, c_10_min])
    
    c_11 = (np.random.rand(count) * 720 + 80)
    c_11_max = np.max(c_11)
    c_11_min = np.min(c_11)
    data_max_min.append([c_11_max, c_11_min])
    
    c_12 = (np.random.rand(count) * 9 + 1)
    c_12_max = np.max(c_12)
    c_12_min = np.min(c_12)
    data_max_min.append([c_12_max, c_12_min])
    
    c_13 = (np.random.rand(count) * 810 + 90)
    c_13_max = np.max(c_13)
    c_13_min = np.min(c_13)
    data_max_min.append([c_13_max, c_13_min])
    
    c_14 = (np.random.rand(count) * 18 + 2)
    c_14_max = np.max(c_14)
    c_14_min = np.min(c_14)
    data_max_min.append([c_14_max, c_14_min])
    
    c_15 = (np.random.rand(count) * 810 + 90)
    c_15_max = np.max(c_15)
    c_15_min = np.min(c_15)
    data_max_min.append([c_15_max, c_15_min])
    
    c_16 = (np.random.rand(count) * 6.3 + 0.7) 
    c_16_max = np.max(c_16)
    c_16_min = np.min(c_16)
    data_max_min.append([c_16_max, c_16_min])
    
    c_17 = (np.random.rand(count) * 855 + 95)
    c_17_max = np.max(c_17)
    c_17_min = np.min(c_17)
    data_max_min.append([c_17_max, c_17_min])
    
    c_18 = (np.random.rand(count) * 720 + 80)
    c_18_max = np.max(c_18)
    c_18_min = np.min(c_18)
    data_max_min.append([c_18_max, c_18_min])
    
    c_19 = (np.random.rand(count) * 810 + 90)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])
    
    c_20 = (np.random.rand(count) * 810 + 90)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])
    
    c_21 = (np.random.rand(count) * 855 + 95)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])
    
    c_22 = (np.random.rand(count) * 810 + 90)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])
    
    c_23 = (np.random.rand(count) * 720 + 80)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 810 + 90)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    
    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_1[i]))
        data_row.append(float(c_2[i]))
        data_row.append(float(c_3[i]))
        data_row.append(float(c_4[i]))
        data_row.append(float(c_5[i]))
        data_row.append(float(c_6[i]))
        data_row.append(float(c_7[i]))
        data_row.append(float(c_8[i]))
        data_row.append(float(c_9[i]))
        data_row.append(float(c_10[i]))
        data_row.append(float(c_11[i]))
        data_row.append(float(c_12[i]))
        data_row.append(float(c_13[i]))
        data_row.append(float(c_14[i]))
        data_row.append(float(c_15[i]))
        data_row.append(float(c_16[i]))
        data_row.append(float(c_17[i]))
        data_row.append(float(c_18[i]))
        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(5)
        data_set.append(data_row)
    return (data_set, data_max_min)

def getLevelFourData_v2(count, startIndex):
    """生成4级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:
        array: 符合4级阈值范围的数据
    """
    data_set = []
    data_max_min = []
    c_1 = (np.random.rand(count) * 20 + 20)
    c_1_max = np.max(c_1)
    c_1_min = np.min(c_1)
    data_max_min.append([c_1_max, c_1_min])
    
    c_2 = (np.random.rand(count) * 0.05 + 0.9)
    c_2_max = np.max(c_2)
    c_2_min = np.min(c_2)
    data_max_min.append([c_2_max, c_2_min])
    
    c_3 = (np.random.rand(count) * 20 + 60)
    c_3_max = np.max(c_3)
    c_3_min = np.min(c_3)
    data_max_min.append([c_3_max, c_3_min])
    
    c_4 = (np.random.rand(count) * 1000 + 4000)
    c_4_max = np.max(c_4)
    c_4_min = np.min(c_4)
    data_max_min.append([c_4_max, c_4_min])
    
    c_5 = (np.random.rand(count) * 15 + 80)
    c_5_max = np.max(c_5)
    c_5_min = np.min(c_5)
    data_max_min.append([c_5_max, c_5_min])
    
    c_6 = (np.random.rand(count) * 15 + 80)
    c_6_max = np.max(c_6)
    c_6_min = np.min(c_6)
    data_max_min.append([c_6_max, c_6_min])
    
    c_7 = (np.random.rand(count) * 5 + 15)
    c_7_max = np.max(c_7)
    c_7_min = np.min(c_7)
    data_max_min.append([c_7_max, c_7_min])
    
    c_8 = (np.random.rand(count) * 15 + 75)
    c_8_max = np.max(c_8)
    c_8_min = np.min(c_8)
    data_max_min.append([c_8_max, c_8_min])
    
    c_9 = (np.random.rand(count) * 10 + 80)
    c_9_max = np.max(c_9)
    c_9_min = np.min(c_9)
    data_max_min.append([c_9_max, c_9_min])
    
    c_10 = (np.random.rand(count) * 2 + 2)
    c_10_max = np.max(c_10)
    c_10_min = np.min(c_10)
    data_max_min.append([c_10_max, c_10_min])
    
    c_11 = (np.random.rand(count) * 20 + 60)
    c_11_max = np.max(c_11)
    c_11_min = np.min(c_11)
    data_max_min.append([c_11_max, c_11_min])
    
    c_12 = (np.random.rand(count) * 10 + 10)
    c_12_max = np.max(c_12)
    c_12_min = np.min(c_12)
    data_max_min.append([c_12_max, c_12_min])
    
    c_13 = (np.random.rand(count) * 10 + 80)
    c_13_max = np.max(c_13)
    c_13_min = np.min(c_13)
    data_max_min.append([c_13_max, c_13_min])
    
    c_14 = (np.random.rand(count) * 20 + 20)
    c_14_max = np.max(c_14)
    c_14_min = np.min(c_14)
    data_max_min.append([c_14_max, c_14_min])
    
    c_15 = (np.random.rand(count) * 10 + 80)
    c_15_max = np.max(c_15)
    c_15_min = np.min(c_15)
    data_max_min.append([c_15_max, c_15_min])
    
    c_16 = (np.random.rand(count) * 0.1 + 0.6)
    c_16_max = np.max(c_16)
    c_16_min = np.min(c_16)
    data_max_min.append([c_16_max, c_16_min])
    
    c_17 = (np.random.rand(count) * 15 + 80)
    c_17_max = np.max(c_17)
    c_17_min = np.min(c_17)
    data_max_min.append([c_17_max, c_17_min])
    
    c_18 = (np.random.rand(count) * 20 + 60)
    c_18_max = np.max(c_18)
    c_18_min = np.min(c_18)
    data_max_min.append([c_18_max, c_18_min])
    
    c_19 = (np.random.rand(count) * 10 + 80)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])
    
    c_20 = (np.random.rand(count) * 10 + 80)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])
    
    c_21 = (np.random.rand(count) * 10 + 85)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])
    
    c_22 = (np.random.rand(count) * 10 + 80)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])
    
    c_23 = (np.random.rand(count) * 20 + 60)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 10 + 80)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_1[i]))
        data_row.append(float(c_2[i]))
        data_row.append(float(c_3[i]))
        data_row.append(float(c_4[i]))
        data_row.append(float(c_5[i]))
        data_row.append(float(c_6[i]))
        data_row.append(float(c_7[i]))
        data_row.append(float(c_8[i]))
        data_row.append(float(c_9[i]))
        data_row.append(float(c_10[i]))
        data_row.append(float(c_11[i]))
        data_row.append(float(c_12[i]))
        data_row.append(float(c_13[i]))
        data_row.append(float(c_14[i]))
        data_row.append(float(c_15[i]))
        data_row.append(float(c_16[i]))
        data_row.append(float(c_17[i]))
        data_row.append(float(c_18[i]))
        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(4)
        data_set.append(data_row)
    return (data_set, data_max_min)

def getLevelThreeData_v2(count, startIndex):
    """生成3级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:
        array: 符合3级阈值范围的数据
    """
    data_set = []
    data_max_min = []
    c_1 = (np.random.rand(count) * 20 + 40)
    c_1_max = np.max(c_1)
    c_1_min = np.min(c_1)
    data_max_min.append([c_1_max, c_1_min])
    
    c_2 = (np.random.rand(count) * 0.05 + 0.85)
    c_2_max = np.max(c_2)
    c_2_min = np.min(c_2)
    data_max_min.append([c_2_max, c_2_min])
    
    c_3 = (np.random.rand(count) * 20 + 40)
    c_3_max = np.max(c_3)
    c_3_min = np.min(c_3)
    data_max_min.append([c_3_max, c_3_min])
    
    c_4 = (np.random.rand(count) * 1000 + 3000)
    c_4_max = np.max(c_4)
    c_4_min = np.min(c_4)
    data_max_min.append([c_4_max, c_4_min])
    
    c_5 = (np.random.rand(count) * 20 + 60)
    c_5_max = np.max(c_5)
    c_5_min = np.min(c_5)
    data_max_min.append([c_5_max, c_5_min])
    
    c_6 = (np.random.rand(count) * 20 + 60)
    c_6_max = np.max(c_6)
    c_6_min = np.min(c_6)
    data_max_min.append([c_6_max, c_6_min])
    
    c_7 = (np.random.rand(count) * 5 + 10)
    c_7_max = np.max(c_7)
    c_7_min = np.min(c_7)
    data_max_min.append([c_7_max, c_7_min])
    
    c_8 = (np.random.rand(count) * 15 + 60)
    c_8_max = np.max(c_8)
    c_8_min = np.min(c_8)
    data_max_min.append([c_8_max, c_8_min])
    
    c_9 = (np.random.rand(count) * 10 + 70)
    c_9_max = np.max(c_9)
    c_9_min = np.min(c_9)
    data_max_min.append([c_9_max, c_9_min])
    
    c_10 = (np.random.rand(count) * 2 + 4)
    c_10_max = np.max(c_10)
    c_10_min = np.min(c_10)
    data_max_min.append([c_10_max, c_10_min])
    
    c_11 = (np.random.rand(count) * 20 + 40)
    c_11_max = np.max(c_11)
    c_11_min = np.min(c_11)
    data_max_min.append([c_11_max, c_11_min])
    
    c_12 = (np.random.rand(count) * 20 + 20)
    c_12_max = np.max(c_12)
    c_12_min = np.min(c_12)
    data_max_min.append([c_12_max, c_12_min])
    
    c_13 = (np.random.rand(count) * 10 + 70)
    c_13_max = np.max(c_13)
    c_13_min = np.min(c_13)
    data_max_min.append([c_13_max, c_13_min])
    
    c_14 = (np.random.rand(count) * 20 + 40)
    c_14_max = np.max(c_14)
    c_14_min = np.min(c_14)
    data_max_min.append([c_14_max, c_14_min])
    
    c_15 = (np.random.rand(count) * 10 + 70)
    c_15_max = np.max(c_15)
    c_15_min = np.min(c_15)
    data_max_min.append([c_15_max, c_15_min])
    
    c_16 = (np.random.rand(count) * 0.1 + 0.5)
    c_16_max = np.max(c_16)
    c_16_min = np.min(c_16)
    data_max_min.append([c_16_max, c_16_min])
    
    c_17 = (np.random.rand(count) * 20 + 60)
    c_17_max = np.max(c_17)
    c_17_min = np.min(c_17)
    data_max_min.append([c_17_max, c_17_min])
    
    c_18 = (np.random.rand(count) * 20 + 40)
    c_18_max = np.max(c_18)
    c_18_min = np.min(c_18)
    data_max_min.append([c_18_max, c_18_min])
    
    c_19 = (np.random.rand(count) * 10 + 70)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])
    
    c_20 = (np.random.rand(count) * 10 + 70)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])
    
    c_21 = (np.random.rand(count) * 15 + 70)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])
    
    c_22 = (np.random.rand(count) * 10 + 70)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])
    
    c_23 = (np.random.rand(count)  * 20 + 40)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 20 + 60)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_1[i]))
        data_row.append(float(c_2[i]))
        data_row.append(float(c_3[i]))
        data_row.append(float(c_4[i]))
        data_row.append(float(c_5[i]))
        data_row.append(float(c_6[i]))
        data_row.append(float(c_7[i]))
        data_row.append(float(c_8[i]))
        data_row.append(float(c_9[i]))
        data_row.append(float(c_10[i]))
        data_row.append(float(c_11[i]))
        data_row.append(float(c_12[i]))
        data_row.append(float(c_13[i]))
        data_row.append(float(c_14[i]))
        data_row.append(float(c_15[i]))
        data_row.append(float(c_16[i]))
        data_row.append(float(c_17[i]))
        data_row.append(float(c_18[i]))
        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(3)
        data_set.append(data_row)
    return (data_set, data_max_min)

def getLevelTwoData_v2(count, startIndex):
    """生成2级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:
        array: 符合数据的数组
    """
    data_set = []
    data_max_min = []
    c_1 = (np.random.rand(count) * 20 + 60)
    c_1_max = np.max(c_1)
    c_1_min = np.min(c_1)
    data_max_min.append([c_1_max, c_1_min])
    
    # c_2 = (np.random.randint(90, 95, size=count) / 100)
    c_2 = (np.random.rand(count) * 0.05 + 0.8)
    c_2_max = np.max(c_2)
    c_2_min = np.min(c_2)
    data_max_min.append([c_2_max, c_2_min])
    
    c_3 = (np.random.rand(count) * 20 + 20)
    c_3_max = np.max(c_3)
    c_3_min = np.min(c_3)
    data_max_min.append([c_3_max, c_3_min])
    
    c_4 = (np.random.rand(count) * 1000 + 2000)
    c_4_max = np.max(c_4)
    c_4_min = np.min(c_4)
    data_max_min.append([c_4_max, c_4_min])
    
    c_5 = (np.random.rand(count) * 20 + 40)
    c_5_max = np.max(c_5)
    c_5_min = np.min(c_5)
    data_max_min.append([c_5_max, c_5_min])
    
    c_6 = (np.random.rand(count) * 20 + 40)
    c_6_max = np.max(c_6)
    c_6_min = np.min(c_6)
    data_max_min.append([c_6_max, c_6_min])
    
    c_7 = (np.random.rand(count) * 5 + 5)
    c_7_max = np.max(c_7)
    c_7_min = np.min(c_7)
    data_max_min.append([c_7_max, c_7_min])
    
    c_8 = (np.random.rand(count) * 30 + 30)
    c_8_max = np.max(c_8)
    c_8_min = np.min(c_8)
    data_max_min.append([c_8_max, c_8_min])
    
    c_9 = (np.random.rand(count) * 20 + 50)
    c_9_max = np.max(c_9)
    c_9_min = np.min(c_9)
    data_max_min.append([c_9_max, c_9_min])
    
    c_10 = (np.random.rand(count) * 4 + 6)
    c_10_max = np.max(c_10)
    c_10_min = np.min(c_10)
    data_max_min.append([c_10_max, c_10_min])
    
    c_11 = (np.random.rand(count) * 20 + 20)
    c_11_max = np.max(c_11)
    c_11_min = np.min(c_11)
    data_max_min.append([c_11_max, c_11_min])
    
    c_12 = (np.random.rand(count) * 20 + 40)
    c_12_max = np.max(c_12)
    c_12_min = np.min(c_12)
    data_max_min.append([c_12_max, c_12_min])
    
    c_13 = (np.random.rand(count) * 20 + 50)
    c_13_max = np.max(c_13)
    c_13_min = np.min(c_13)
    data_max_min.append([c_13_max, c_13_min])
    
    c_14 = (np.random.rand(count) * 20 + 60)
    c_14_max = np.max(c_14)
    c_14_min = np.min(c_14)
    data_max_min.append([c_14_max, c_14_min])
    
    c_15 = (np.random.rand(count) * 20 + 50)
    c_15_max = np.max(c_15)
    c_15_min = np.min(c_15)
    data_max_min.append([c_15_max, c_15_min])
    
    # c_16 = np.random.randint(60, 70, size=count, dtype=np.float32)
    c_16 = (np.random.rand(count) * 0.1 + 0.4)
    c_16_max = np.max(c_16)
    c_16_min = np.min(c_16)
    data_max_min.append([c_16_max, c_16_min])
    
    c_17 = (np.random.rand(count) * 20 + 40)
    c_17_max = np.max(c_17)
    c_17_min = np.min(c_17)
    data_max_min.append([c_17_max, c_17_min])
    
    c_18 = (np.random.rand(count) * 20 + 20)
    c_18_max = np.max(c_18)
    c_18_min = np.min(c_18)
    data_max_min.append([c_18_max, c_18_min])
    
    c_19 = (np.random.rand(count) * 30 + 40)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])
    
    c_20 = (np.random.rand(count) * 30 + 40)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])
    
    c_21 = (np.random.rand(count) * 30 + 40)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])
    
    c_22 = (np.random.rand(count) * 20 + 50)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])
    
    c_23 = (np.random.rand(count) * 20 + 20)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 30 + 30)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_1[i]))
        data_row.append(float(c_2[i]))
        data_row.append(float(c_3[i]))
        data_row.append(float(c_4[i]))
        data_row.append(float(c_5[i]))
        data_row.append(float(c_6[i]))
        data_row.append(float(c_7[i]))
        data_row.append(float(c_8[i]))
        data_row.append(float(c_9[i]))
        data_row.append(float(c_10[i]))
        data_row.append(float(c_11[i]))
        data_row.append(float(c_12[i]))
        data_row.append(float(c_13[i]))
        data_row.append(float(c_14[i]))
        data_row.append(float(c_15[i]))
        data_row.append(float(c_16[i]))
        data_row.append(float(c_17[i]))
        data_row.append(float(c_18[i]))
        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(2)
        data_set.append(data_row)
    return (data_set, data_max_min)

def getLevelOneData_v2(count, startIndex):
    """生成1级数据

    Args:
        count (int): 数量
        startIndex (int): 起始Index

    Returns:
        array: 返回符合1级阈值的数据
    """
    data_set = []
    data_max_min = []
    c_1 = (np.random.rand(count) * 720 + 80)
    c_1_max = np.max(c_1)
    c_1_min = np.min(c_1)
    data_max_min.append([c_1_max, c_1_min])
    
    # c_2 = (np.random.randint(90, 95, size=count) / 100)
    c_2 = (np.random.rand(count) * 0.72 + 0.08)
    c_2_max = np.max(c_2)
    c_2_min = np.min(c_2)
    data_max_min.append([c_2_max, c_2_min])
    
    c_3 = (np.random.rand(count) * 18 + 2)
    c_3_max = np.max(c_3)
    c_3_min = np.min(c_3)
    data_max_min.append([c_3_max, c_3_min])
    
    c_4 = (np.random.rand(count) * 1800 + 200)
    c_4_max = np.max(c_4)
    c_4_min = np.min(c_4)
    data_max_min.append([c_4_max, c_4_min])
    
    c_5 = (np.random.rand(count) * 36 + 4)
    c_5_max = np.max(c_5)
    c_5_min = np.min(c_5)
    data_max_min.append([c_5_max, c_5_min])
    
    c_6 = (np.random.rand(count) * 36 + 4)
    c_6_max = np.max(c_6)
    c_6_min = np.min(c_6)
    data_max_min.append([c_6_max, c_6_min])
    
    c_7 = (np.random.rand(count) * 4.5 + 0.5)
    c_7_max = np.max(c_7)
    c_7_min = np.min(c_7)
    data_max_min.append([c_7_max, c_7_min])
    
    c_8 = (np.random.rand(count) * 27 + 3)
    c_8_max = np.max(c_8)
    c_8_min = np.min(c_8)
    data_max_min.append([c_8_max, c_8_min])
    
    c_9 = (np.random.rand(count) * 45 + 5)
    c_9_max = np.max(c_9)
    c_9_min = np.min(c_9)
    data_max_min.append([c_9_max, c_9_min])
    
    c_10 = (np.random.rand(count) * 90 + 10)
    c_10_max = np.max(c_10)
    c_10_min = np.min(c_10)
    data_max_min.append([c_10_max, c_10_min])
    
    c_11 = (np.random.rand(count) * 18 + 2)
    c_11_max = np.max(c_11)
    c_11_min = np.min(c_11)
    data_max_min.append([c_11_max, c_11_min])
    
    c_12 = (np.random.rand(count) * 540 + 60)
    c_12_max = np.max(c_12)
    c_12_min = np.min(c_12)
    data_max_min.append([c_12_max, c_12_min])
    
    c_13 = (np.random.rand(count) * 45 + 5)
    c_13_max = np.max(c_13)
    c_13_min = np.min(c_13)
    data_max_min.append([c_13_max, c_13_min])
    
    c_14 = (np.random.rand(count) * 720 + 80)
    c_14_max = np.max(c_14)
    c_14_min = np.min(c_14)
    data_max_min.append([c_14_max, c_14_min])
    
    c_15 = (np.random.rand(count) * 45 + 5)
    c_15_max = np.max(c_15)
    c_15_min = np.min(c_15)
    data_max_min.append([c_15_max, c_15_min])
    
    # c_16 = np.random.randint(60, 70, size=count, dtype=np.float32)
    c_16 = (np.random.rand(count) * 0.36 + 0.04)
    c_16_max = np.max(c_16)
    c_16_min = np.min(c_16)
    data_max_min.append([c_16_max, c_16_min])
    
    c_17 = (np.random.rand(count) * 36 + 4)
    c_17_max = np.max(c_17)
    c_17_min = np.min(c_17)
    data_max_min.append([c_17_max, c_17_min])
    
    c_18 = (np.random.rand(count) * 18 + 2)
    c_18_max = np.max(c_18)
    c_18_min = np.min(c_18)
    data_max_min.append([c_18_max, c_18_min])
    
    c_19 = (np.random.rand(count) * 36 + 4)
    c_19_max = np.max(c_19)
    c_19_min = np.min(c_19)
    data_max_min.append([c_19_max, c_19_min])
    
    c_20 = (np.random.rand(count) * 36 + 4)
    c_20_max = np.max(c_20)
    c_20_min = np.min(c_20)
    data_max_min.append([c_20_max, c_20_min])
    
    c_21 = (np.random.rand(count) * 36 + 4)
    c_21_max = np.max(c_21)
    c_21_min = np.min(c_21)
    data_max_min.append([c_21_max, c_21_min])
    
    c_22 = (np.random.rand(count) * 45 + 5)
    c_22_max = np.max(c_22)
    c_22_min = np.min(c_22)
    data_max_min.append([c_22_max, c_22_min])
    
    c_23 = (np.random.rand(count) * 18 + 2)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 27 + 3)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_1[i]))
        data_row.append(float(c_2[i]))
        data_row.append(float(c_3[i]))
        data_row.append(float(c_4[i]))
        data_row.append(float(c_5[i]))
        data_row.append(float(c_6[i]))
        data_row.append(float(c_7[i]))
        data_row.append(float(c_8[i]))
        data_row.append(float(c_9[i]))
        data_row.append(float(c_10[i]))
        data_row.append(float(c_11[i]))
        data_row.append(float(c_12[i]))
        data_row.append(float(c_13[i]))
        data_row.append(float(c_14[i]))
        data_row.append(float(c_15[i]))
        data_row.append(float(c_16[i]))
        data_row.append(float(c_17[i]))
        data_row.append(float(c_18[i]))
        data_row.append(float(c_19[i]))
        data_row.append(float(c_20[i]))
        data_row.append(float(c_21[i]))
        data_row.append(float(c_22[i]))
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(1)
        data_set.append(data_row)
    return (data_set, data_max_min)

c_1_max = 800.0
c_1_min = 2.0

c_2_max = 9.5
c_2_min = 0.08

c_3_max = 800.0
c_3_min = 0.2

c_4_max = 50000.0
c_4_min = 200.0

c_5_max = 950.0
c_5_min = 4.0

c_6_max = 950.0
c_6_min = 4.0

c_7_max = 200.0
c_7_min = 0.5

c_8_max = 900.0
c_8_min = 3.0

c_9_max = 900.0
c_9_min = 5.0

c_10_max = 100.0
c_10_min = 0.2

c_11_max = 800.0
c_11_min = 2.0

c_12_max = 600.0
c_12_min = 1.0

c_13_max = 900.0
c_13_min = 5.0

c_14_max = 800.0
c_14_min = 2.0

c_15_max = 900.0
c_15_min = 5.0

c_16_max = 7.0
c_16_min = 0.04

c_17_max = 950.0
c_17_min = 4.0

c_18_max = 800.0
c_18_min = 2.0

c_19_max = 900.0
c_19_min = 4.0

c_20_max = 900.0
c_20_min = 4.0

c_21_max = 950.0
c_21_min = 4.0

c_22_max = 900.0
c_22_min = 5.0

c_23_max = 800.0
c_23_min = 2.0

c_24_max = 900.0
c_24_min = 3.0

def preHandleData_L26_v2(data):
    """归一化处理数据，26位数组

    Args:
        data (array): 要处理的数据，每一条为26个元素的数据，要处理的为下标1~24的数据

    Returns:
        [array]: 处理好的数据
    """
    data_row = [] 
    data_row.append(data[0])
    data_row.append(((1/data[1]) * 100)/(c_1_max - c_1_min)) # 负向
    data_row.append((data[2] - c_2_min)/(c_2_max - c_2_min)) # 正向
    data_row.append((data[3] - c_3_min)/(c_3_max - c_3_min))
    data_row.append((data[4] - c_4_min)/(c_4_max - c_4_min))
    data_row.append((data[5] - c_5_min)/(c_5_max - c_5_min))
    data_row.append((data[6] - c_6_min)/(c_6_max - c_6_min))
    data_row.append((data[7] - c_7_min)/(c_7_max - c_7_min))
    data_row.append((data[8] - c_8_min)/(c_8_max - c_8_min))
    data_row.append((data[9] - c_9_min)/(c_9_max - c_9_min))
    data_row.append(((1/data[10]) * 100)/(c_10_max - c_10_min))
    data_row.append((data[11] - c_11_min)/(c_11_max - c_11_min))
    data_row.append(((1/data[12]) * 100)/(c_12_max - c_12_min))
    data_row.append((data[13] - c_13_min)/(c_13_max - c_13_min))
    data_row.append(((1/data[14]) * 100)/(c_14_max - c_14_min))
    data_row.append((data[15] - c_15_min)/(c_15_max - c_15_min))
    data_row.append((data[16] - c_16_min)/(c_16_max - c_16_min))
    data_row.append((data[17] - c_17_min)/(c_17_max - c_17_min))
    data_row.append((data[18] - c_18_min)/(c_18_max - c_18_min))
    data_row.append((data[19] - c_19_min)/(c_19_max - c_19_min))
    data_row.append((data[20] - c_20_min)/(c_20_max - c_20_min))
    data_row.append((data[21] - c_21_min)/(c_21_max - c_21_min))
    data_row.append((data[22] - c_22_min)/(c_22_max - c_22_min))
    data_row.append((data[23] - c_23_min)/(c_23_max - c_23_min))
    data_row.append((data[24] - c_24_min)/(c_24_max - c_24_min))
    data_row.append(data[25])
    return data_row

def preHandleData_v2(data):
    """归一化处理数据，24位数组

    Args:
        data (array): 要处理的数据，24位

    Returns:
        array: 处理好的数据
    """
    data_res = []
    data_res.append(((1/data[0])*100 - c_1_min)/(c_1_max - c_1_min)) # 负向
    data_res.append((data[1] - c_2_min)/(c_2_max - c_2_min)) # 正向
    data_res.append((data[2] - c_3_min)/(c_3_max - c_3_min))
    data_res.append((data[3] - c_4_min)/(c_4_max - c_4_min))
    data_res.append((data[4] - c_5_min)/(c_5_max - c_5_min))
    data_res.append((data[5] - c_6_min)/(c_6_max - c_6_min))
    data_res.append((data[6] - c_7_min)/(c_7_max - c_7_min))
    data_res.append((data[7] - c_8_min)/(c_8_max - c_8_min))
    data_res.append((data[8] - c_9_min)/(c_9_max - c_9_min))
    data_res.append(((1/data[9])*100 - c_10_min)/(c_10_max - c_10_min))
    data_res.append((data[10] - c_11_min)/(c_11_max - c_11_min))
    data_res.append(((1/data[11])*100 - c_12_min)/(c_12_max - c_12_min))
    data_res.append((data[12] - c_13_min)/(c_13_max - c_13_min))
    data_res.append(((1/data[13])*100 - c_14_min)/(c_14_max - c_14_min))
    data_res.append((data[14] - c_15_min)/(c_15_max - c_15_min))
    data_res.append((data[15] - c_16_min)/(c_16_max - c_16_min))
    data_res.append((data[16] - c_17_min)/(c_17_max - c_17_min))
    data_res.append((data[17] - c_18_min)/(c_18_max - c_18_min))
    data_res.append((data[18] - c_19_min)/(c_19_max - c_19_min))
    data_res.append((data[19] - c_20_min)/(c_20_max - c_20_min))
    data_res.append((data[20] - c_21_min)/(c_21_max - c_21_min))
    data_res.append((data[21] - c_22_min)/(c_22_max - c_22_min))
    data_res.append((data[22] - c_23_min)/(c_23_max - c_23_min))
    data_res.append((data[23] - c_24_min)/(c_24_max - c_24_min))
    return data_res

def get_rfr_param_grid():
    """网格训练参数，rfr，一般参数

    Returns:
        [type]: [description]
    """
    param_grid = {
        'criterion': ['mae', 'mse'],
        'n_estimators': [800, 1000],
        'max_features': ['sqrt'],
        'max_depth': [4, 5],
        'min_samples_split': [8, 12, 16] }

    param_grid_2 = {
        'criterion': ['mae', 'mse'],
        'n_estimators': [800, 1000],
        'max_features': ['sqrt'] }
    return param_grid_2

def get_rfr_best_param_grid():
    param_grid = {
        'criterion': ['mae'],
        'n_estimators': [1000],
        'max_features': ['sqrt'],
        'max_depth': [4],
        'min_samples_split': [12] }
    return param_grid

def get_train_data():
    """生成训练数据

    Returns:
        [[],[]...]: 生成的随机样本数据
    """
    data_lv5, max_min_lv5 = getLevelFiveData_v2(100, 1)
    data_lv4, max_min_lv4 = getLevelFourData_v2(100, 101)
    data_lv3, max_min_lv3 = getLevelThreeData_v2(100, 201)
    data_lv2, max_min_lv2 = getLevelTwoData_v2(100, 301)
    data_lv1, max_min_lv1 = getLevelOneData_v2(100, 401)
    data_all = []

    for item in data_lv5:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)
    
    for item in data_lv4:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)
    
    for item in data_lv3:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)
    
    for item in data_lv2:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)

    for item in data_lv1:
        item_handled = preHandleData_L26_v2(item)
        data_all.append(item_handled)
    
    return data_all



def get_model():
    return 0

def save_model(model):
    return 0

# 26位数组的对应列名
column_names = ['index', 'c_1', 'c_2', 'c_3', 'c_4', 'c_5', 'c_6', 'c_7', 'c_8', 'c_9', 'c_10', 'c_11', 'c_12', 'c_13', 'c_14', 'c_15', 'c_16', 'c_17', 'c_18', 'c_19', 'c_20', 'c_21', 'c_22', 'c_23', 'c_24', 'level']

def train():
    result = {
        'id': [],
        'start': [],
        'end': [],
        'start_cross_val': [],
        'end_cross_val': [],
        'MSE': [],
        'MSE_2': [],
        'RMSE': [],
        'R2': [],
        'NMSE': [],
        'Lv_5': [],
        'Lv_4': [],
        'Lv_3': [],
        'Lv_2': [],
        'Lv_1': [],
        'pre_2015': [],
        'pre_2020': [],
        'pre_2030': []
    }

    # 创建随机森林回归模型
    rfr = ensemble.RandomForestRegressor()

    # 创建网格搜索模型
    rfr_cv = GridSearchCV(estimator=rfr, param_grid=get_rfr_best_param_grid(), cv=5)
    print("创建模型")

    temp_5 = []
    temp_4 = []
    temp_3 = []
    temp_2 = []

    for i in range(100):
        print(str.format("=== 第 {0} 次运行 ===", i))
        # 创建训练数据
        data_init = get_train_data()
        df = pd.DataFrame(data_init, columns=column_names)
        Y = df['level']
        X = df.iloc[:, 1:-1]
        print("初始化数据")

        # 打乱
        X, Y = su.shuffle(X, Y ,random_state=7)
        print("打乱数据")

        X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state=123)
        # 切分数据

        start_time = time.localtime()
        print(str.format("开始训练：{0}", time.asctime(start_time)))
        # 开始训练
        rfr_cv.fit(X_train, Y_train)
        # 训练结束
        end_time = time.localtime()
        print(str.format("结束训练：{0}", time.asctime(end_time)))

        start_cross_val_time = time.localtime()
        print(str.format("开始交叉验证：{0}", time.asctime(start_cross_val_time)))
        score = cross_val_score(rfr_cv, X_test, Y_test, scoring='neg_mean_squared_error', cv=10).mean()
        end_cross_val_time = time.localtime()
        print(str.format("结束交叉验证：{0}", time.asctime(end_cross_val_time)))

        # 预测
        Y_pred = rfr_cv.predict(X_test)

        # 计算精度
        MSE = mean_squared_error(Y_test, Y_pred)
        MSE_2 = np.mean((Y_test - Y_pred)**2)
        RMSE = np.sqrt(mean_squared_error(Y_test, Y_pred))
        R2 = r2_score(Y_test, Y_pred)

        Y_test_mean = np.array([np.mean(Y_test)])
        NMSE = (np.sum((Y_pred-Y_test)**2))/(np.sum((Y_test-Y_test_mean)**2))

        print("MSE: " + str(MSE))
        print("MSE_2: " + str(MSE_2))
        print("RMSE: " + str(RMSE))
        print("R2: " + str(R2))
        print("NMSE: " + str(NMSE))

        # 模拟计算
        data_limit_5 = [20, 0.95, 80, 5000, 95, 95, 20, 90, 90, 2, 80, 10, 90, 20, 90, 0.7, 95, 80, 90, 90, 95, 90, 80, 90]
        data_limit_4 = [40, 0.9, 60, 4000, 80, 80, 15, 75, 80, 4, 60, 20, 80, 40, 80, 0.6, 80, 60, 80, 80, 85, 80, 60, 80]
        data_limit_3 = [60, 0.85, 40, 3000, 60, 60, 10, 60, 70, 6, 40, 40, 70, 60, 70, 0.5, 60, 40, 70, 70, 70, 70, 40, 60]
        data_limit_2 = [80, 0.8, 20, 2000, 40, 40, 5, 30, 50, 10, 20, 60, 50, 80, 50, 0.4, 40, 20, 40, 40, 40, 50, 20, 30]

        data_limit_5 = preHandleData_v2(data_limit_5)
        data_limit_4 = preHandleData_v2(data_limit_4)
        data_limit_3 = preHandleData_v2(data_limit_3)
        data_limit_2 = preHandleData_v2(data_limit_2)

        pre_5 = rfr_cv.predict([data_limit_5])
        pre_4 = rfr_cv.predict([data_limit_4])
        pre_3 = rfr_cv.predict([data_limit_3])
        pre_2 = rfr_cv.predict([data_limit_2])

        temp_5.append(pre_5)
        temp_4.append(pre_4)
        temp_3.append(pre_3)
        temp_2.append(pre_2)

        print("Lv.5: >" + str(pre_5[0]))
        print("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]")
        print("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]")
        print("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]")
        print("Lv.1: <" + str(pre_2[0])) 

        # 计算2015，2020，2030
        data_2015 = [7.0, 0.964, 51, 4781, 80, 75, 5, 50, 60, 4.5, 80, 30, 100, 60, 80, 0.52, 80, 70, 60, 50, 85, 70, 40, 50]
        data_2020 = [8.1, 0.946, 49, 4275, 90, 80, 8, 75, 80, 4.8, 85, 35, 100, 42, 95, 0.60, 90, 80, 80, 80, 90, 80, 70, 80]
        data_2030 = [8.5, 0.913, 47, 4068, 100, 95, 10, 85, 95, 5.5, 90, 40, 100, 30, 100, 0.70, 95, 90, 95, 95, 95, 95, 90, 95]

        data_2015 = preHandleData_v2(data_2015)
        data_2020 = preHandleData_v2(data_2020)
        data_2030 = preHandleData_v2(data_2030)

        data_predict_2015 = rfr_cv.predict([data_2015])
        data_predict_2020 = rfr_cv.predict([data_2020])
        data_predict_2030 = rfr_cv.predict([data_2030])

        print("2015: " + str(data_predict_2015[0]))
        print("2020: " + str(data_predict_2020[0]))
        print("2030: " + str(data_predict_2030[0]))

        # 保存结果
        result['id'].append(i)
        result['start'].append(time.strftime("%Y-%m-%d %H:%M:%S", start_time))
        result['end'].append(time.strftime("%Y-%m-%d %H:%M:%S", end_time))
        result['start_cross_val'].append(time.strftime("%Y-%m-%d %H:%M:%S", start_cross_val_time))
        result['end_cross_val'].append(time.strftime("%Y-%m-%d %H:%M:%S", end_cross_val_time))
        result['MSE'].append(MSE)
        result['MSE_2'].append(MSE_2)
        result['RMSE'].append(RMSE)
        result['R2'].append(R2)
        result['NMSE'].append(NMSE)
        result['Lv_5'].append(("Lv.5: >" + str(pre_5[0])))
        result['Lv_4'].append(("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]"))
        result['Lv_3'].append(("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]"))
        result['Lv_2'].append(("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]"))
        result['Lv_1'].append(("Lv.1: <" + str(pre_2[0])))
        result['pre_2015'].append(data_predict_2015[0])
        result['pre_2020'].append(data_predict_2020[0])
        result['pre_2030'].append(data_predict_2030[0])

    result['id'].append(0)
    result['start'].append("")
    result['end'].append("")
    result['start_cross_val'].append("")
    result['end_cross_val'].append("")
    result['MSE'].append(0.0)
    result['MSE_2'].append(0.0)
    result['RMSE'].append(0.0)
    result['R2'].append(0.0)
    result['NMSE'].append(0.0)
    result['Lv_5'].append(("Lv.5: >" + str(np.mean(temp_5))))
    result['Lv_4'].append(("Lv.4: (" + str(np.mean(temp_4)) + ", " + str(np.mean(temp_5)) + "]"))
    result['Lv_3'].append(("Lv.3: (" + str(np.mean(temp_3)) + ", " + str(np.mean(temp_4)) + "]"))
    result['Lv_2'].append(("Lv.2: (" + str(np.mean(temp_2)) + ", " + str(np.mean(temp_3)) + "]"))
    result['Lv_1'].append(("Lv.1: <=" + str(np.mean(temp_2))))
    result['pre_2015'].append(np.mean(result['pre_2015']))
    result['pre_2020'].append(np.mean(result['pre_2020']))
    result['pre_2030'].append(np.mean(result['pre_2030']))

    df_result = pd.DataFrame.from_dict(result)
    df_result.to_csv('result.csv', index=True, header=True, sep=',', encoding='utf-8')

    # 保存模型
    joblib.dump(rfr_cv, 'rfr_cv.joblib')

def train_v2():
    result = {
        'id': [],
        'start': [],
        'end': [],
        'start_cross_val': [],
        'end_cross_val': [],
        'MSE': [],
        'MSE_2': [],
        'RMSE': [],
        'R2': [],
        'NMSE': [],
        'Lv_5': [],
        'Lv_4': [],
        'Lv_3': [],
        'Lv_2': [],
        'Lv_1': [],
        'pre_2015': [],
        'pre_2020': [],
        'pre_2030': []
    }

    temp_5 = []
    temp_4 = []
    temp_3 = []
    temp_2 = []

    # 模拟计算
    data_limit_5 = [20, 0.95, 80, 5000, 95, 95, 20, 90, 90, 2, 80, 10, 90, 20, 90, 0.7, 95, 80, 90, 90, 95, 90, 80, 90]
    data_limit_4 = [40, 0.9, 60, 4000, 80, 80, 15, 75, 80, 4, 60, 20, 80, 40, 80, 0.6, 80, 60, 80, 80, 85, 80, 60, 80]
    data_limit_3 = [60, 0.85, 40, 3000, 60, 60, 10, 60, 70, 6, 40, 40, 70, 60, 70, 0.5, 60, 40, 70, 70, 70, 70, 40, 60]
    data_limit_2 = [80, 0.8, 20, 2000, 40, 40, 5, 30, 50, 10, 20, 60, 50, 80, 50, 0.4, 40, 20, 40, 40, 40, 50, 20, 30]

    data_limit_5 = preHandleData_v2(data_limit_5)
    data_limit_4 = preHandleData_v2(data_limit_4)
    data_limit_3 = preHandleData_v2(data_limit_3)
    data_limit_2 = preHandleData_v2(data_limit_2)

    data_2015 = [7.0, 0.964, 51, 4781, 80, 75, 5, 50, 60, 4.5, 80, 30, 100, 60, 80, 0.52, 80, 70, 60, 50, 85, 70, 40, 50]
    data_2020 = [8.1, 0.946, 49, 4275, 90, 80, 8, 75, 80, 4.8, 85, 35, 100, 42, 95, 0.60, 90, 80, 80, 80, 90, 80, 70, 80]
    data_2030 = [8.5, 0.913, 47, 4068, 100, 95, 10, 85, 95, 5.5, 90, 40, 100, 30, 100, 0.70, 95, 90, 95, 95, 95, 95, 90, 95]

    data_2015 = preHandleData_v2(data_2015)
    data_2020 = preHandleData_v2(data_2020)
    data_2030 = preHandleData_v2(data_2030)


    model_filename = "rfr_v2.joblib"
    result_filename = "result_v2.csv"
    rfr_cv = None
    if(os.path.exists(model_filename) and os.path.isfile(model_filename)):
        ReLoadParam = input("Reload Params ? y/n")
        if(ReLoadParam == "y"):   
            rfr = ensemble.RandomForestRegressor()
            rfr_cv = GridSearchCV(estimator=rfr, param_grid=get_rfr_param_grid(), cv=5)
            print("创建新的模型, 新参数")
        else:
            rfr_cv=joblib.load(model_filename)
            print("载入已训练的模型")
    else:
        rfr = ensemble.RandomForestRegressor()
        rfr_cv = GridSearchCV(estimator=rfr, param_grid=get_rfr_param_grid(), cv=5)
        print("创建新的模型")
    
    # 训练数据
    for i in range(100):
        print(str.format("=== 第 {0} 次运行 ===", i))
        # 创建训练数据
        data_init = get_train_data()
        df = pd.DataFrame(data_init, columns=column_names)
        Y = df['level']
        X = df.iloc[:, 1:-1]
        print("初始化数据")

        # 打乱
        X, Y = su.shuffle(X, Y ,random_state=7)
        print("打乱数据")

        X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state=123)

        start_time = time.localtime()
        print(str.format("开始训练：{0}", time.asctime(start_time)))
        # 开始训练
        rfr_cv.fit(X_train, Y_train)
        # 训练结束
        end_time = time.localtime()
        print(str.format("结束训练：{0}", time.asctime(end_time)))
    
        start_cross_val_time = time.localtime()
        print(str.format("开始交叉验证：{0}", time.asctime(start_cross_val_time)))
        score = cross_val_score(rfr_cv, X_test, Y_test, scoring='neg_mean_squared_error', cv=10).mean()
        end_cross_val_time = time.localtime()
        print(str.format("结束交叉验证：{0}", time.asctime(end_cross_val_time)))

        # 预测
        Y_pred = rfr_cv.predict(X_test)

        # 计算精度
        MSE = mean_squared_error(Y_test, Y_pred)
        MSE_2 = np.mean((Y_test - Y_pred)**2)
        RMSE = np.sqrt(mean_squared_error(Y_test, Y_pred))
        R2 = r2_score(Y_test, Y_pred)

        Y_test_mean = np.array([np.mean(Y_test)])
        NMSE = (np.sum((Y_pred-Y_test)**2))/(np.sum((Y_test-Y_test_mean)**2))

        print("MSE: " + str(MSE))
        print("MSE_2: " + str(MSE_2))
        print("RMSE: " + str(RMSE))
        print("R2: " + str(R2))
        print("NMSE: " + str(NMSE))

        print(rfr_cv.best_params_)

        result['id'].append(i)
        result['start'].append(time.strftime("%Y-%m-%d %H:%M:%S", start_time))
        result['end'].append(time.strftime("%Y-%m-%d %H:%M:%S", end_time))
        result['start_cross_val'].append(time.strftime("%Y-%m-%d %H:%M:%S", start_cross_val_time))
        result['end_cross_val'].append(time.strftime("%Y-%m-%d %H:%M:%S", end_cross_val_time))
        result['MSE'].append(MSE)
        result['MSE_2'].append(MSE_2)
        result['RMSE'].append(RMSE)
        result['R2'].append(R2)
        result['NMSE'].append(NMSE)

        pre_5 = rfr_cv.predict([data_limit_5])
        pre_4 = rfr_cv.predict([data_limit_4])
        pre_3 = rfr_cv.predict([data_limit_3])
        pre_2 = rfr_cv.predict([data_limit_2])

        temp_5.append(pre_5)
        temp_4.append(pre_4)
        temp_3.append(pre_3)
        temp_2.append(pre_2)

        print("Lv.5: >" + str(pre_5[0]))
        print("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]")
        print("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]")
        print("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]")
        print("Lv.1: <" + str(pre_2[0])) 

        data_predict_2015 = rfr_cv.predict([data_2015])
        data_predict_2020 = rfr_cv.predict([data_2020])
        data_predict_2030 = rfr_cv.predict([data_2030])
        
        print("2015: " + str(data_predict_2015[0]))
        print("2020: " + str(data_predict_2020[0]))
        print("2030: " + str(data_predict_2030[0]))

        result['Lv_5'].append(("Lv.5: >" + str(pre_5[0])))
        result['Lv_4'].append(("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]"))
        result['Lv_3'].append(("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]"))
        result['Lv_2'].append(("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]"))
        result['Lv_1'].append(("Lv.1: <" + str(pre_2[0])))
        result['pre_2015'].append(data_predict_2015[0])
        result['pre_2020'].append(data_predict_2020[0])
        result['pre_2030'].append(data_predict_2030[0])


        if(data_predict_2030[0] <= pre_5[0]):
            continue

        input_string = input("是否继续？y/n")
        if(input_string == "n"):
            break
    
    result['id'].append(0)
    result['start'].append("")
    result['end'].append("")
    result['start_cross_val'].append("")
    result['end_cross_val'].append("")
    result['MSE'].append(0.0)
    result['MSE_2'].append(0.0)
    result['RMSE'].append(0.0)
    result['R2'].append(0.0)
    result['NMSE'].append(0.0)
    result['Lv_5'].append(("Lv.5: >" + str(np.mean(temp_5))))
    result['Lv_4'].append(("Lv.4: (" + str(np.mean(temp_4)) + ", " + str(np.mean(temp_5)) + "]"))
    result['Lv_3'].append(("Lv.3: (" + str(np.mean(temp_3)) + ", " + str(np.mean(temp_4)) + "]"))
    result['Lv_2'].append(("Lv.2: (" + str(np.mean(temp_2)) + ", " + str(np.mean(temp_3)) + "]"))
    result['Lv_1'].append(("Lv.1: <=" + str(np.mean(temp_2))))
    result['pre_2015'].append(np.mean(result['pre_2015']))
    result['pre_2020'].append(np.mean(result['pre_2020']))
    result['pre_2030'].append(np.mean(result['pre_2030']))

    df_result = pd.DataFrame.from_dict(result)
    df_result.to_csv(result_filename, index=True, header=True, sep=',', encoding='utf-8')

    # 保存模型
    joblib.dump(rfr_cv, model_filename)

def train_v3(rfr_model):
    # 准备数据
    data_init = get_train_data()
    df = pd.DataFrame(data_init, columns=column_names)
    Y = df["level"]
    X = df.iloc[:, 1:-1]

    X, Y = su.shuffle(X, Y ,random_state=7)
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state=123)

    # 训练
    start_time = time.localtime()
    print(str.format("开始训练：{0}", time.asctime(start_time)))
    rfr_model.fit(X_train, Y_train)
    end_time = time.localtime()
    print(str.format("结束训练：{0}", time.asctime(end_time)))

    start_cross_val_time = time.localtime()
    print(str.format("开始交叉验证：{0}", time.asctime(start_cross_val_time)))
    score = cross_val_score(rfr_model, X_test, Y_test, scoring='neg_mean_squared_error', cv=10).mean()
    end_cross_val_time = time.localtime()
    print(str.format("结束交叉验证：{0}", time.asctime(end_cross_val_time)))

    Y_pred = rfr_model.predict(X_test)

    # 计算精度
    MSE = mean_squared_error(Y_test, Y_pred)
    MSE_2 = np.mean((Y_test - Y_pred)**2)
    RMSE = np.sqrt(mean_squared_error(Y_test, Y_pred))
    R2 = r2_score(Y_test, Y_pred)

    Y_test_mean = np.array([np.mean(Y_test)])
    NMSE = (np.sum((Y_pred-Y_test)**2))/(np.sum((Y_test-Y_test_mean)**2))

    print("MSE: " + str(MSE))
    print("MSE_2: " + str(MSE_2))
    print("RMSE: " + str(RMSE))
    print("R2: " + str(R2))
    print("NMSE: " + str(NMSE))

    return rfr_model


rfr_file_name = "rfr.joblib"
rfr_result_name = "rfr_result.csv"

def get_model():
    exists_old = os.path.exists(rfr_file_name)
    if(exists_old):
        build_new = input("Exists a model, build a new? y/n:")
        if(build_new == "n"):
            print("Load exists model")
            return joblib.load(rfr_file_name)
    print("Build new model")
    return GridSearchCV(estimator=ensemble.RandomForestRegressor(), param_grid=get_rfr_best_param_grid(), cv=5)


def main():
    rfr = get_model()

    KEEP_GOING = True

    while KEEP_GOING:
        tarin_start = input("Start train model? y/n:")
        if(tarin_start == "y"):
            KEEP_GOING = True
        else:
            KEEP_GOING = False
            break
        
        rfr = train_v3(rfr)
        print(rfr.best_params_)

        # 预测数据

        # Save
        save_model = input("Save Model? y/n:")
        if(save_model == "y"):
            module

if __name__ == "__main__":
    #main();
    train_v2();
    print("Finish!")


















