import copy
import os
import sys
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.naive_bayes import MultinomialNB
from sklearn.naive_bayes import BernoulliNB
from sklearn.naive_bayes import GaussianNB
from sklearn.tree import DecisionTreeClassifier
from sklearn.tree import DecisionTreeRegressor
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.ensemble import AdaBoostRegressor
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import RandomizedSearchCV
from scipy.stats import uniform


CURRENT_PATH = os.getcwd()
sys.path.append(CURRENT_PATH)
from leads_rating_eval import validate, pre_auc, model_evaluate
from leads_rating_ETL import pre_labeler, pre_ohe, data_cut
from utils import class_label2
import pandas as pd
import numpy as np
from sklearn.externals import joblib

def RF_Classifier(train_x, train_y, parameters, scoring='roc_auc', cv_n=5):
    RFC_Model = RandomForestClassifier()
    grid_search = GridSearchCV(RFC_Model, parameters, scoring, cv=cv_n)
    grid_search.fit(train_x, train_y)
    RFC_Model = grid_search.best_estimator_
    print('Best Estimator: %s' % (str(RFC_Model)))
    print('Best Score (%s): %f' % (scoring, grid_search.best_score_))
    RFC_Model.fit(train_x, train_y)
    return RFC_Model



def AdBoost_Classifier(train_x, train_y, parameters, scoring='roc_auc', cv_n=5):
    dtc = DecisionTreeClassifier()
    AdBoostC_Model = AdaBoostClassifier(base_estimator=dtc)
    grid_search = GridSearchCV(AdBoostC_Model, parameters, scoring, cv=cv_n)
    grid_search.fit(train_x, train_y)
    AdBoostC_Model = grid_search.best_estimator_
    print('Best Estimator: %s' % (str(AdBoostC_Model)))
    print('Best Score (%s): %f' % (scoring, grid_search.best_score_))
    AdBoostC_Model.fit(train_x, train_y)
    return AdBoostC_Model


parameters_gbdt = {
    'n_estimators': [60],
    'learning_rate': [0.2, 0.4, 0.6],
    'subsample': [0.7],
    'max_features': [0.7],
    'min_samples_split': [15],
    'min_samples_leaf': [20, 15],
    'max_depth': [2],
    'random_state': [180]}

def GBDT_Classifier(train_x, train_y, parameters, scoring='roc_auc', cv_n=3):
    GBDT_Cmodel = GradientBoostingClassifier(random_state = 180)
    grid_search = GridSearchCV(GBDT_Cmodel, parameters, scoring, cv=cv_n )
    grid_search.fit(train_x, train_y)
    GBDT_Cmodel = grid_search.best_estimator_
    print('Best Estimator: %s' % (str(GBDT_Cmodel)))
    print('Best Score (%s): %f' % (scoring, grid_search.best_score_))
    GBDT_Cmodel.fit(train_x, train_y)
    return GBDT_Cmodel

parameters_LR = {'C': [0.1,0.3,0.5,0.7,0.9],
                 'max_iter': [9000], 'penalty' : ['l1','l1'], 'random_state': [180]}

def LR_Classifier(train_x, train_y, parameters, scoring='roc_auc', cv_n=3):
    grid_search = GridSearchCV(LogisticRegression( max_iter=9000), parameters,scoring  , cv=cv_n)
    grid_search.fit(train_x, train_y)
    LR_Model = grid_search.best_estimator_
    print('Best Estimator: %s' % (str(LR_Model)))
    print('Best Score (%s): %f' % (scoring, grid_search.best_score_))
    LR_Model.fit(train_x, train_y)
    return LR_Model

def train2result(config,
                 version_info,
                 feature,
                 data_pred_list,
                 path_dict):
    """
    用训练数据训练模型

    :param config:
    :param version_info:
    :param feature:static_list + dynamic_list
    :param data_pred_list:数据
    :return:
    """
    data_pred_list = copy.deepcopy(data_pred_list)
    source_data, validate_data = data_pred_list
    quantile_split_list = config['quantile_split_list']

    # 训练集与测试集出结果
    # ========================================================================
    str2int_data = pre_labeler(source_data, feature, True, version_info["str2int"])

    X_train, X_test, y_train, y_test = data_cut(str2int_data,
                                                target_column=config["target"],
                                                test_size=config["train_test_split"],
                                                random_state=1)

    X_train_target = copy.deepcopy(X_train)
    X_train_target['target'] = y_train

    X_test_target = copy.deepcopy(X_test)
    X_test_target['target'] = y_test

    X_train = pre_ohe(X_train, feature, True, version_info["ohe"])
    X_test = pre_ohe(X_test, feature, False, version_info["ohe"])

    model_func = config["Model"][1]
    model_parameters = config["Model"][2]

    trained_model = model_func(X_train, y_train, model_parameters )

    joblib.dump(trained_model, version_info["model"])


    # 数据集预测值和roc_auc
    y_train = y_train.astype(float).astype(int)

    # 生成带prob的数据
    X_train_target_p1 = copy.deepcopy(X_train_target)
    X_test_target_p1 = copy.deepcopy(X_test_target)

    # all_lift, all_evaluate = model_evaluate(data_prob)
    y_train_pre, auc_train = pre_auc(trained_model, X_train, y_train)
    y_test_pre, auc_test = pre_auc(trained_model, X_test, y_test)



    X_train_target_p1["prob"] = y_train_pre
    X_test_target_p1["prob"] = y_test_pre

    X_train_target_p1['token'] = 'train_da'
    X_test_target_p1['token'] = 'test_da'

    # 50分切分数数据+3个评估指标
    train_lift, train_evaluate = model_evaluate(X_train_target_p1 , config , False)  # pd.df [['prob','target']]
    test_lift, test_evaluate = model_evaluate(X_test_target_p1 , config , False)

    X_train_target_p1 = X_train_target_p1.sort_values(by='prob', ascending=True)
    X_test_target_p1 = X_test_target_p1.sort_values(by='prob', ascending=True)

    data_prob = pd.concat([X_train_target_p1, X_test_target_p1], axis=0)
    if config["source_data_prob_output"]:
        data_prob.to_csv(path_dict["th_path"] + "/sc_data_prob.csv")



    train_evaluate.loc[1, 'AUC'] = auc_train
    test_evaluate.loc[1, 'AUC'] = auc_test

    train_evaluate['model'] = config["Model"][0]
    test_evaluate['model'] = config["Model"][0]

    # 验证数据集出结果
    # ====================================================================
    # X,y拆分
    y_validate_data = validate_data[config["target"]]

    # 标签化数据
    X_validate_data_label = pre_labeler(validate_data, feature, False, version_info["str2int"])

    # 独热化数据
    validate_data_ohe = pre_ohe(X_validate_data_label, feature, False, version_info["ohe"])

    # 预测及排序
    X_validate_data_label["prob"] = trained_model.predict_proba(validate_data_ohe)[:, 1].astype(float)
    X_validate_data_label["target"] = y_validate_data
    if config["validate_data_prob_output"]:
        X_validate_data_label.to_csv(path_dict["th_path"]+"/val_data_prob.csv")

    X_validate_data_label.sort_values(by='prob', ascending=False, inplace=True)
    X_validate_data_label = X_validate_data_label.iloc[:, :].reset_index()
    X_validate_data_label.drop(['index'], axis=1, inplace=True)

    # 验证集结果的输出
    # 按比例比例取数据

    # quantile_split_list

    prob_info_list = []
    prob_info_name_list = []

    for single_quantile in quantile_split_list:
        quantile_token = X_train_target_p1.iloc[int(np.round(len(X_train_target_p1) * single_quantile)),: ]['prob']
        prob_info_list.append(quantile_token)

        single_quantile_str = ''.join( ['prob_',str(int(single_quantile * 100))])
        prob_info_name_list.append(single_quantile_str)

    prob_lowest = prob_info_list[0] ; prob_hightest = prob_info_list[-1]

    # 获取阈值信息
    prob_info = pd.DataFrame([prob_info_list] , columns = prob_info_name_list)

    train_lift_percentile, _ = model_evaluate(X_train_target_p1, config, True)  # pd.df [['prob','target']]
    test_lift_percentile, _ = model_evaluate(X_test_target_p1, config, True)

    #################################################3

    X_train_target_p1_label = copy.deepcopy(X_train_target_p1)
    X_test_target_p1_label = copy.deepcopy(X_test_target_p1)

    # 这里的class_label2 是用于输出summary的
    # 还需要一个函数输出全量的区分结果
    X_validate_data_label["评级意向_占比"] = X_validate_data_label["prob"].apply(lambda x: class_label2(x, prob_lowest, prob_hightest))
    X_train_target_p1_label["评级意向_阈值"] = X_train_target_p1_label["prob"].apply(lambda x: class_label2(x, prob_lowest, prob_hightest))
    X_test_target_p1_label["评级意向_阈值"] = X_test_target_p1_label["prob"].apply(lambda x: class_label2(x, prob_lowest, prob_hightest))

    result01_validate0002 = X_validate_data_label

    valed_validate_per = validate(result01_validate0002, cross="评级意向_占比")

    valed_train_per = validate(X_train_target_p1_label, cross="评级意向_阈值")
    valed_test_per = validate(X_test_target_p1_label, cross="评级意向_阈值")

    summary_info = pd.DataFrame(columns=['data', 'feature', 'pkl'])
    summary_info.iloc[0:4, 0:4] = np.nan
    summary_info.loc[0, 'data'] = config["Train_data"].split("/")[-1:]
    summary_info.loc[0, 'feature'] = feature
    summary_info.loc[0, 'pkl'] = version_info["model"]
    summary_info.loc[1, 'pkl'] = version_info["str2int"]
    summary_info.loc[2, 'pkl'] = version_info["ohe"]




    ############################################### 结果输出
    writer = pd.ExcelWriter(version_info["result"])

    summary_info.to_excel(writer, 'summary_info', encoding='utf_8_sig', index=False) # 识别模型用的信息

    train_lift.to_excel(writer, 'train_lift', encoding='utf_8_sig')
    test_lift.to_excel(writer, 'test_lift', encoding='utf_8_sig')
    train_evaluate.to_excel(writer, 'train_evaluate', encoding='utf_8_sig', index=False)
    test_evaluate.to_excel(writer, 'test_evaluate', encoding='utf_8_sig', index=False)
    train_lift_percentile.to_excel(writer, 'train_evaluate_percentile', encoding='utf_8_sig', index=False)
    test_lift_percentile.to_excel(writer, 'test_evaluate_percentile', encoding='utf_8_sig', index=False)

    prob_info.to_excel(writer, 'threshold', encoding='utf_8_sig', index=False)

    valed_train_per.to_excel(writer, 'valed_train_per', encoding='utf_8_sig')
    valed_test_per.to_excel(writer , 'valed_test_per' , encoding='utf_8_sig')
    valed_validate_per.to_excel(writer, 'valed_validate_per', encoding='utf_8_sig')

    writer.save()

    # valed_test_per; valed_validate_per; valed_train_per
    return valed_train_per.loc['高', '验证提升'], \
           valed_train_per.loc['高', '验证召回'], \
           valed_train_per.loc['高', '线索占比'], \
           valed_train_per.loc['低', '验证提升'], \
           valed_train_per.loc['低', '验证召回'], \
           valed_train_per.loc['低', '线索占比'], \
           valed_test_per.loc['高', '验证提升'], \
           valed_test_per.loc['高', '验证召回'], \
           valed_test_per.loc['高', '线索占比'], \
           valed_test_per.loc['低', '验证提升'], \
           valed_test_per.loc['低', '验证召回'], \
           valed_test_per.loc['低', '线索占比'], \
           valed_validate_per.loc['高', '验证提升'], \
           valed_validate_per.loc['高', '验证召回'], \
           valed_validate_per.loc['高', '线索占比'], \
           valed_validate_per.loc['低', '验证提升'], \
           valed_validate_per.loc['低', '验证召回'], \
           valed_validate_per.loc['低', '线索占比']





