'''
Description:  
Version: 2.0
Autor: yechunhong
Date: 2022-07-18 09:42:40
LastEditors: yechunhong
LastEditTime: 2022-08-01 16:26:51
'''
import sys
from sklearn.preprocessing import MinMaxScaler, LabelEncoder, StandardScaler
from sklearn.linear_model import Lasso,LinearRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_squared_error
import lightgbm
import matplotlib.pyplot as plt
# import seaborn as sns
import os
import pandas as pd
import numpy as np
import json
import catboost
from sklearn.model_selection import GridSearchCV
from training.ml_frame_class.month_rolling.lgbm_parm_tune import *

from training.ml_frame_class.month_rolling.lgb_loss import my_weight_loss, my_weight_metric



def lgb_train( x_train, x_test, y_train, y_test, args, root):
    print(f'实际数据量：train:{len(x_train)}, eval:{len(x_test)}')
    lgb_train = lightgbm.Dataset(x_train, label=y_train)
    lgb_eval = lightgbm.Dataset(x_test, label=y_test, reference=lgb_train)
    evals_result = {}  # 记录训练结果所用
    gbm_model = lightgbm.train(
        args.ml_param1,
        lgb_train,
        valid_sets=[lgb_train, lgb_eval],
        num_boost_round=args.ml_param2['num_boost_round'],
        early_stopping_rounds=args.ml_param2['early_stopping_rounds'],
        evals_result=evals_result,
        verbose_eval=args.ml_param2['verbose_eval'],
        
        # fobj=my_weight_loss,
        # feval=my_weight_metric,
        )

    feature_importance_df = pd.DataFrame({
            "column": args.feature_list,
            "importance": gbm_model.feature_importance(),
        })
    feature_importance_df = feature_importance_df.sort_values(by=['importance'],ascending=False)
    feature_importance_df.to_csv(f'{args.result_root}/lgb_importantce.csv')
    metric_df = pd.DataFrame({
            "train": evals_result['training'][args.ml_param1['metric']],
            "valid": evals_result['valid_1'][args.ml_param1['metric']],
        })
    metric_df.plot(figsize=(16, 8))
    plt.savefig(f'{root}/lgb_metric.png')
    plt.close()
    metric_df.to_csv(f'{args.result_root}/lgb_metric.csv')

    if args.tune_param:   
        tr, best_param = param_tune(x_train, x_test, y_train, y_test, args)
        tune_plot(tr, root)
        ml_param1 =  args.ml_param1
        ml_param1.update(best_param)
        gbm_model = lightgbm.train(
            ml_param1,
            lgb_train,
            valid_sets=[lgb_train, lgb_eval],
            num_boost_round=args.ml_param2['num_boost_round'],
            early_stopping_rounds=args.ml_param2['early_stopping_rounds'],
            evals_result=evals_result,
            verbose_eval=args.ml_param2['verbose_eval']    )

        with open(f'{root}/lgb_best_param.json', "w", encoding='utf-8') as f:
            json.dump(best_param, f, indent=2, sort_keys=True, ensure_ascii=False)  # 写为多行

        # print(f'train: f:{f1_score(y_train, train_pred)}, r:{recall_score(y_train, train_pred)}, p:{precision_score(y_train, train_pred)}')  
    return gbm_model

def liner_train( x_train, x_test, y_train, y_test, args):
    x_train = np.concatenate([x_train,x_test], axis=0)
    y_train = np.concatenate([y_train,y_test], axis=0)
    model = LinearRegression(fit_intercept=False, normalize=False, n_jobs=20)
    model.fit(x_train, y_train)
    pred_train = model.predict(x_train)
    pred_test = model.predict(x_test)
    print('rmse in train:', mean_squared_error(pred_train, y_train)/len(pred_train))
    print('rmse in test:', mean_squared_error(pred_test, y_test)/len(pred_test))
    print('IC in test:', np.corrcoef(x=pred_test,y=y_test)[0,1])
    return model

def lasso_train( x_train, x_test, y_train, y_test, args):
    x_train = np.concatenate([x_train,x_test], axis=0)
    y_train = np.concatenate([y_train,y_test], axis=0)
    model = Lasso(fit_intercept=False, normalize=False, n_jobs=20)
    model.fit(x_train, y_train)
    pred_train = model.predict(x_train)
    pred_test = model.predict(x_test)
    print('rmse in train:', mean_squared_error(pred_train, y_train)/len(pred_train))
    print('rmse in test:', mean_squared_error(pred_test, y_test)/len(pred_test))
    print('IC in test:', np.corrcoef(x=pred_test,y=y_test)[0,1])
    return model

def random_forest_train(x_train, x_test, y_train, y_test, args):
    x_train = np.concatenate([x_train,x_test], axis=0)
    y_train = np.concatenate([y_train,y_test], axis=0)
    model = RandomForestRegressor(n_jobs=20)#**param_dic
    model.fit(x_train, y_train)
    pred_train = model.predict(x_train)
    pred_test = model.predict(x_test)
    print('rmse in train:', mean_squared_error(pred_train, y_train)/len(pred_train))
    print('rmse in test:', mean_squared_error(pred_test, y_test)/len(pred_test))
    print('IC in test:', np.corrcoef(x=pred_test,y=y_test)[0,1])
    return model

    

def catboost_train( x_train, x_test, y_train, y_test, ml_param):
    # suggestion_params = {
    #         'n_estimators':5000,
    #         'learning_rate': 0.05,
    #         # 'eval_metric': True,
    #         'loss_function':'RMSE',
    #         'random_seed':2022,
    #         'metric_period':500,
    #         'od_wait':500,
    #         # 'task_type':'GPU',
    #         'depth': 7,
    #         # "device":'0:1'
    #         #'colsample_bylevel':0.7,
    #         }
    ml_param1 = ml_param['param1']
    ml_param2 = ml_param['param2']
    train_pool = catboost.Pool(x_train, y_train )
    validate_pool = catboost.Pool( x_test,  y_test)
    model = catboost.CatBoostRegressor(**ml_param1)
    model.fit(train_pool, 
                eval_set=validate_pool,
                early_stopping_rounds=ml_param2['early_stopping_rounds'],
                verbose=ml_param2['verbose_eval'])
    return model

    
# def cat_importance( model, feature_columns):#修改合并
#     feature_importances = model.get_feature_importance(train_pool)
#     feature_names = feature_columns


def xgboost_train( x_train, x_test, y_train, y_test, args):
    pass

def mlp_train( x_train, x_test, y_train, y_test,args):
    pass



def IC_lgb(pred, label):
    n = len(pred)
    # constant = 1 / ((np.std(label)) / np.sqrt(1 / n))
    e_x = pred.sum() / n
    e_y = label.sum() / n
    sum_x_minus_e_x = np.square(pred - e_x).sum()
    grad = []
    hess = []
    for i in range(n):
        w_i = label[i] - e_y
        z_i = pred[i] - e_x
        cov_x_y_plus_n = np.sum((pred - e_x) * (label - e_y))
        grad_i = (w_i * np.sqrt(sum_x_minus_e_x) - (z_i * cov_x_y_plus_n / np.sqrt(sum_x_minus_e_x))) / sum_x_minus_e_x 
        grad.append(grad_i)
        hess_i_first_term = w_i * z_i / np.sqrt(sum_x_minus_e_x) - ((cov_x_y_plus_n - w_i * z_i) * np.sqrt(sum_x_minus_e_x) - (np.square(z_i) * cov_x_y_plus_n / np.sqrt(sum_x_minus_e_x)))
        hess_i_second_term = 2 * z_i * grad_i
        hess_i = (hess_i_first_term - hess_i_second_term) / np.square(sum_x_minus_e_x)
        hess.append(hess_i)
    return np.array(grad) * constant, np.array(hess) * constant


def IC_loss(var_a:np.array, var_b:np.array):
    ic = np.corrcoef(var_a, var_b)[0][1]
    is_higher_better = True
    return 'IC_loss', ic, is_higher_better