# -*- coding: utf-8 -*-

import pandas as pd
import time
import numpy as np
from sklearn.model_selection import GridSearchCV, cross_val_score, train_test_split
import sklearn.ensemble as ensemble
import sklearn.utils as su
from sklearn.metrics import mean_squared_error, r2_score
import joblib
import os


# c18-22
def getData_Lv5(count, startIndex):
    """Level 5

    """
    data_set = []
    data_max_min = []
    
    c_23 = (np.random.rand(count) * 720 + 80)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 810 + 90)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])
    
    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(5)
        data_set.append(data_row)
    return (data_set, data_max_min)

def getData_Lv4(count, startIndex):
    """Level 4

    """
    data_set = []
    data_max_min = []
    c_23 = (np.random.rand(count) * 20 + 60)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 10 + 80)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])
    
    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(4)
        data_set.append(data_row)
    return (data_set, data_max_min)

def getData_Lv3(count, startIndex):
    """Level 3

    """
    data_set = []
    data_max_min = []
    c_23 = (np.random.rand(count)  * 20 + 40)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 20 + 60)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])
    
    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(3)
        data_set.append(data_row)
    return (data_set, data_max_min)

def getData_Lv2(count, startIndex):
    """Level 2
    """
    data_set = []
    data_max_min = []
    c_23 = (np.random.rand(count) * 20 + 20)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 30 + 30)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])


    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(2)
        data_set.append(data_row)
    return (data_set, data_max_min)

def getData_Lv1(count, startIndex):
    """Level 1

    """
    data_set = []
    data_max_min = []
    c_23 = (np.random.rand(count) * 18 + 2)
    c_23_max = np.max(c_23)
    c_23_min = np.min(c_23)
    data_max_min.append([c_23_max, c_23_min])
    
    c_24 = (np.random.rand(count) * 27 + 3)
    c_24_max = np.max(c_24)
    c_24_min = np.min(c_24)
    data_max_min.append([c_24_max, c_24_min])

    for i in range(count):
        data_row = []
        data_row.append(startIndex + i)
        data_row.append(float(c_23[i]))
        data_row.append(float(c_24[i]))
        data_row.append(1)
        data_set.append(data_row)
    return (data_set, data_max_min)

c_23_max = 800.0
c_23_min = 2.0

c_24_max = 900.0
c_24_min = 3.0

def preHandleData_L7(data):
    """归一化处理，length:7

    """
    data_row = [] 
    data_row.append(data[0])
    data_row.append((data[1] - c_23_min)/(c_23_max - c_23_min))
    data_row.append((data[2] - c_24_min)/(c_24_max - c_24_min))
    data_row.append(data[3])
    return data_row

def preHandleData(data):
    """归一化处理，length:7

    """
    data_res = []
    data_res.append((data[0] - c_23_min)/(c_23_max - c_23_min))
    data_res.append((data[1] - c_24_min)/(c_24_max - c_24_min))
    return data_res

def get_train_param_grid():
    param_grid = {
        'criterion': ['mae', 'mse'],
        'n_estimators': [800, 1000],
        'max_features': ['sqrt'],
        'max_depth': [4, 5],
        'min_samples_split': [8, 12, 16]
    }
    return param_grid

def get_best_train_param_grid():
    param_grid = {
        'criterion': ['mae'],
        'n_estimators': [1000],
        'max_features': ['sqrt'],
        'max_depth': [4],
        'min_samples_split': [12] 
    }
    return param_grid

def get_train_data():
    """获取训练数据
    """
    data_lv5, max_min_lv5 = getData_Lv5(100, 1)
    data_lv4, max_min_lv4 = getData_Lv4(100, 101)
    data_lv3, max_min_lv3 = getData_Lv3(100, 201)
    data_lv2, max_min_lv2 = getData_Lv2(100, 301)
    data_lv1, max_min_lv1 = getData_Lv1(100, 401)
    data_all = []
    
    for item in data_lv5:
        item_handled = preHandleData_L7(item)
        data_all.append(item_handled)
    
    for item in data_lv4:
        item_handled = preHandleData_L7(item)
        data_all.append(item_handled)
    
    for item in data_lv3:
        item_handled = preHandleData_L7(item)
        data_all.append(item_handled)
    
    for item in data_lv2:
        item_handled = preHandleData_L7(item)
        data_all.append(item_handled)

    for item in data_lv1:
        item_handled = preHandleData_L7(item)
        data_all.append(item_handled)

    return data_all

model_name = "water_resource.joblib"
result_name = "water_resource_result.joblib"
column_names = ['index', 'c_23', 'c_24', 'level']

def get_model():
    exists_old = os.path.exists(model_name)
    if(exists_old):
        build_new = input("Exists a model, build a new? y/n:")
        if(build_new == "n"):
            print("Load exists model")
    print("Build new model")
    return GridSearchCV(estimator=ensemble.RandomForestRegressor(), param_grid=get_best_train_param_grid(), cv=5)

def train_model(rfr_model):
    # 准备数据
    data_init = get_train_data()
    df = pd.DataFrame(data_init, columns=column_names)
    Y = df["level"]
    X = df.iloc[:, 1:-1]

    X, Y = su.shuffle(X, Y ,random_state=7)
    X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.4, random_state=123)

    # 训练
    start_time = time.localtime()
    print(str.format("开始训练：{0}", time.asctime(start_time)))
    rfr_model.fit(X_train, Y_train)
    end_time = time.localtime()
    print(str.format("结束训练：{0}", time.asctime(end_time)))

    start_cross_val_time = time.localtime()
    print(str.format("开始交叉验证：{0}", time.asctime(start_cross_val_time)))
    score = cross_val_score(rfr_model, X_test, Y_test, scoring='neg_mean_squared_error', cv=10).mean()
    end_cross_val_time = time.localtime()
    print(str.format("结束交叉验证：{0}", time.asctime(end_cross_val_time)))

    Y_pred = rfr_model.predict(X_test)

    # 计算精度
    MSE = mean_squared_error(Y_test, Y_pred)
    MSE_2 = np.mean((Y_test - Y_pred)**2)
    RMSE = np.sqrt(mean_squared_error(Y_test, Y_pred))
    R2 = r2_score(Y_test, Y_pred)

    Y_test_mean = np.array([np.mean(Y_test)])
    NMSE = (np.sum((Y_pred-Y_test)**2))/(np.sum((Y_test-Y_test_mean)**2))

    print("MSE: " + str(MSE))
    print("MSE_2: " + str(MSE_2))
    print("RMSE: " + str(RMSE))
    print("R2: " + str(R2))
    print("NMSE: " + str(NMSE))

    return rfr_model

temp_5 = []
temp_4 = []
temp_3 = []
temp_2 = []

def predict_data(rfr_model):
    # 模拟计算
    data_limit_5 = [80, 90]
    data_limit_4 = [60, 80]
    data_limit_3 = [40, 60]
    data_limit_2 = [20, 30]

    data_limit_5 = preHandleData(data_limit_5)
    data_limit_4 = preHandleData(data_limit_4)
    data_limit_3 = preHandleData(data_limit_3)
    data_limit_2 = preHandleData(data_limit_2)

    pre_5 = rfr_model.predict([data_limit_5])
    pre_4 = rfr_model.predict([data_limit_4])
    pre_3 = rfr_model.predict([data_limit_3])
    pre_2 = rfr_model.predict([data_limit_2])

    temp_5.append(pre_5)
    temp_4.append(pre_4)
    temp_3.append(pre_3)
    temp_2.append(pre_2)

    print("Lv.5: >" + str(pre_5[0]))
    print("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]")
    print("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]")
    print("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]")
    print("Lv.1: <" + str(pre_2[0])) 

    # 计算2015，2020，2030
    data_2015 = [40, 50]
    data_2020 = [70, 80]
    data_2030 = [90, 95]

    data_2015 = preHandleData(data_2015)
    data_2020 = preHandleData(data_2020)
    data_2030 = preHandleData(data_2030)

    data_predict_2015 = rfr_model.predict([data_2015])
    data_predict_2020 = rfr_model.predict([data_2020])
    data_predict_2030 = rfr_model.predict([data_2030])

    print("2015: " + str(data_predict_2015[0]))
    print("2020: " + str(data_predict_2020[0]))
    print("2030: " + str(data_predict_2030[0]))

    return {
        'Lv_5': ("Lv.5: >" + str(pre_5[0])),
        'Lv_4': ("Lv.4: (" + str(pre_4[0]) + ", " + str(pre_5[0]) + "]"),
        'Lv_3': ("Lv.3: (" + str(pre_3[0]) + ", " + str(pre_4[0]) + "]"),
        'Lv_2': ("Lv.2: (" + str(pre_2[0]) + ", " + str(pre_3[0]) + "]"),
        'Lv_1': ("Lv.1: <" + str(pre_2[0])),
        'pre_2015': data_predict_2015[0],
        'pre_2020': data_predict_2020[0],
        'pre_2030': data_predict_2030[0]
    }

def main():
    rfr = get_model()

    KEEP_GOING = True

    while KEEP_GOING:
        train_start = input("Start train model? y/n:")
        if(train_start == "y"):
            KEEP_GOING=True
        else:
            KEEP_GOING=False
            break

        rfr = train_model(rfr)
        print(rfr.best_params_)

        predict_data(rfr)

        save_model = input("Save Model? y/n:")
        if(save_model == "y"):
            model_name = input("Filename? :")
            joblib.dump(model_name, rfr)

if __name__ == "__main__":
    main()








