'''
Description:  
Version: 2.0
Autor: yechunhong
Date: 2022-07-18 09:42:40
LastEditors: yechunhong
LastEditTime: 2022-08-01 16:26:51
'''
import sys
from sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler
from sklearn.linear_model import Lasso,LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import lightgbm
import matplotlib.pyplot as plt
# import seaborn as sns
import os
import pandas as pd
import numpy as np
import catboost
def lgb_train( X_train, Y_train, args):
    idx = int(len(X_train) * args.train_ratio - args.label_range)
    x_train = X_train[:idx]
    y_train = Y_train[:idx]
    x_test = X_train[idx:]
    y_test = Y_train[idx:]
    print(f'实际数据量：train{len(x_train)}, eval:{len(x_test)}')
    ml_param1 = args.ml_param1
    ml_param2 = args.ml_param2
    feature_columns = args.feature_list
    root = args.result_root
    lgb_train = lightgbm.Dataset(x_train, label=y_train)
    lgb_eval = lightgbm.Dataset(x_test, label=y_test, reference=lgb_train)
    evals_result = {}  # 记录训练结果所用
    gbm_model = lightgbm.train(
        ml_param1,
        lgb_train,
        valid_sets=[lgb_train, lgb_eval],
        num_boost_round=ml_param2['num_boost_round'],
        early_stopping_rounds=ml_param2['early_stopping_rounds'],
        evals_result=evals_result,
        verbose_eval=ml_param2['verbose_eval'],
    )
    
    feature_importance_df = pd.DataFrame({
            "column": feature_columns,
            "importance": gbm_model.feature_importance(),
        })
    feature_importance_df = feature_importance_df.sort_values(by=['importance'],ascending=False)
    feature_importance_df.to_csv(f'{root}/lgb_importantce.csv')
    metric_df = pd.DataFrame({
            "train": evals_result['training'][ml_param1['metric']],
            "valid": evals_result['valid_1'][ml_param1['metric']],
        })
    metric_df.plot(figsize=(16, 8))
    plt.savefig(f'{root}/lgb_metric.png')
    plt.close()
    metric_df.to_csv(f'{root}/lgb_metric.csv')
    return gbm_model

def random_forest_train(x_train, y_train, args):
    model = RandomForestRegressor(n_jobs=-1)#**param_dic
    model.fit(x_train, y_train)
    pred_train = model.predict(x_train)
    print('rmse in train:', mean_squared_error(pred_train, y_train)/len(pred_train))
    print('IC in train:', np.corrcoef(x=pred_train,y=y_train)[0,1])
    return model 

def liner_train(x_train, y_train, args):
    model = LinearRegression(fit_intercept=False, normalize=False)
    model.fit(x_train, y_train)
    pred_train = model.predict(x_train)
    print('rmse in train:', mean_squared_error(pred_train, y_train)/len(pred_train))
    print('IC in train:', np.corrcoef(x=pred_train,y=y_train)[0,1])
    return model

def lasso_train(x_train, y_train, args):
    model = Lasso(fit_intercept=False, normalize=False)
    model.fit(x_train, y_train)
    pred_train = model.predict(x_train)
    print('rmse in train:', mean_squared_error(pred_train, y_train)/len(pred_train))
    print('IC in train:', np.corrcoef(x=pred_train,y=y_train)[0,1])
    return model 

def catboost_train( x_train, x_test, y_train, y_test, args):
    # suggestion_params = {
    #         'n_estimators':5000,
    #         'learning_rate': 0.05,
    #         # 'eval_metric': True,
    #         'loss_function':'RMSE',
    #         'random_seed':2022,
    #         'metric_period':500,
    #         'od_wait':500,
    #         # 'task_type':'GPU',
    #         'depth': 7,
    #         # "device":'0:1'
    #         #'colsample_bylevel':0.7,
    #         }
    ml_param1 = args.model_param1
    ml_param2 = args.model_param2
    train_pool = catboost.Pool(x_train, y_train )
    validate_pool = catboost.Pool( x_test,  y_test)
    model = catboost.CatBoostRegressor(**ml_param1)
    model.fit(train_pool, 
                eval_set=validate_pool,
                early_stopping_rounds=ml_param2['early_stopping_rounds'],
                verbose=ml_param2['verbose_eval'])
    return model

    
# def cat_importance( model, feature_columns):#修改合并
#     feature_importances = model.get_feature_importance(train_pool)
#     feature_names = feature_columns




def xgboost_train( x_train, x_test, y_train, y_test, args):
    pass

def mlp_train( x_train, x_test, y_train, y_test, args):
    pass

