from sklearn.feature_selection import VarianceThreshold, mutual_info_regression, f_classif, mutual_info_classif
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_regression
from sklearn.feature_selection import chi2
from sklearn.feature_selection import SelectFromModel #回归模型和数模型
from sklearn.linear_model import Lasso
from sklearn.svm import LinearSVC
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier, RandomForestRegressor
from sklearn.linear_model import LogisticRegression
import numpy as np
import pandas as pd
import dbtool
import loaddata
import json
import dataupload
import errorcode
import datetime
from lib.dateutil import transform_to_days, which_is_datetime

"""
功能：自动化特征提取
"""
def min_max_sca(x):
    """
    功能：最大值、最小值化
    :param x: np.array
    :return:
    """
    x_min = min(x)
    x_max = max(x)
    result = (x - x_min)/(x_max - x_min + 0.000000001)
    result = result * 100
    result = result.map(int)
    return result

def remove_all_na(data):
    """
    去掉相关系数矩阵中的na值
    :param data: 相关系数矩阵
    :return:
    """

    #找到需要去掉的字段
    data_na = data.isnull().sum()
    data_col_all_na = data.columns[data_na == data.shape[0]]


    data_remove_col = data.drop(list(data_col_all_na), axis = 1)
    data_remove_row = data_remove_col.drop(list(data_col_all_na), axis = 0)

    return data_remove_row

def remove_var_f(data_feature, threshold = 0): #阈值默认为0.即剔除值完全一样的变量
    """
    功能：移除低方差变量,移除方差低于阈值的变量
    :param data: 原数据
    :param threshold: 阈值
    :return: 移除变量后的数据
    """
    sel = VarianceThreshold(threshold=threshold)
    data_feature_last = sel.fit_transform(data_feature)
    keep_list = sel.get_support(indices=True)
    return data_feature_last, keep_list

def remove_cor_f(data_feature, threshold): #通过相关系数剔除变量
    """
    功能：移除具有高度相关性的变量中的一个
    :param data: type 为二维numpy.ndarray 或者pd.core.frame.DataFrame
    :param threshold:
    :return:
    """
    if not isinstance(data_feature, pd.core.frame.DataFrame):
        columns = ['X_' + str(i) for i in range(data_feature.shape[1])]
        data_feature = pd.DataFrame(data_feature, columns = columns)

    cor_list = []
    for i in range(data_feature.shape[1]):
        for j in range(i + 1, data_feature.shape[1]):
            cor = data_feature.iloc[:, i].corr(data_feature.iloc[:, j])
            temp = ((i, j), cor)
            cor_list.append(temp)

    #对于相关性比较高的保留其中一个
    pool_list = []
    pool_remove = set()
    for i in cor_list:
        if i[1] > threshold:
            pool_list.append(i[0])
            pool_remove.add(i[0][0])

    #去掉指定列
    data_feature_last = np.delete(data_feature.values, list(pool_remove), axis = 1)

    #保留的列数
    keep_list = [i for i in range(data_feature.shape[1]) if i not in pool_remove]
    return data_feature_last, keep_list

def select_k_best(data_feature, data_label, model_type, method, k):
    """
    功能：单变量特征选择：通过计算单个特征与目标变量的统计量，保留值最大的特征
    :param data_feature: 自变量
    :param data_label: 因变量
    :param method: 选择特征的方法：回归-相关系数、分类-卡方检验
    :param k: 要保留自变量的个数
    :return:
    """

    #方法选择
    if model_type == 'regression':
        if method == 'f_regression': #相关系数
            model = SelectKBest(f_regression, k=k)
        if method == 'mutual_info_regression':
            model = SelectKBest(mutual_info_regression, k=k)
    if model_type == 'classifier':
        if method == 'chi2': #卡方检验
            model = SelectKBest(chi2, k=k)
        if method == 'f_classif': #方差分析
            model = SelectKBest(f_classif, k=k)
        if method == 'mutual_info_classif':
            model = SelectKBest(mutual_info_classif, k=k)

    #执行转换
    data_feature_last = model.fit_transform(data_feature, data_label)
    keep_list = model.get_support(indices=True)

    return data_feature_last, keep_list

def select_from_model(data_feature, data_label, model_type, method = None):
    """
    功能：通过模型筛选变量:L1 正则  树模型
    :param data_feature: 自变量
    :param data_label: 因变量
    :param model_type: 模型类型
    :param method: 算法
    :return:
    """
    #选择模型类型和方法
    if model_type == 'regression':
        clf = Lasso().fit(data_feature, data_label)
    if model_type == 'classifier':
        if method == 'svm-l1': #通过变量系数选择
            clf = LinearSVC(C=0.01, penalty='l1', dual=False).fit(data_feature, data_label)
        if method == 'logit-l1':
            clf = LogisticRegression(penalty='l1', solver='saga').fit(data_feature, data_label)
        if method == 'extratree': #通过树模型选择
            clf = ExtraTreesClassifier().fit(data_feature, data_label)
        if method == 'randomforest':
            clf = RandomForestClassifier().fit(data_feature, data_label)
    model = SelectFromModel(clf, prefit=True) #prefit为True则必须直接调用transform，否则需要先fit；阈值默认为均值

    #特征选择
    data_feature_last = model.transform(data_feature)
    list_keep = model.get_support(indices=True)
    return data_feature_last, list_keep

def feature_importance(data_feature, data_label, model_type): #特征重要程度排序
    """
    功能：特征重要性排序
    :param data_feature: 自变量, DataFrame类型
    :param data_label: 因变量, Series类型
    :param model_type: 模型类型
    :return:
    """
    if model_type == '1001': #分类
        rf_model = RandomForestClassifier(random_state=0)
    if model_type == '1002': #回归
        rf_model = RandomForestRegressor(random_state=0)
    rf_model.fit(data_feature, data_label)
    feature_imp = rf_model.feature_importances_
    feature_imp = np.round(feature_imp, 2)
    importance_dt = pd.DataFrame({'colname':data_feature.columns, 'importance':feature_imp})

    #特征重要程度排序
    importance_dt = importance_dt.sort_values(by='importance', ascending=False)

    #格式化 最高值为100
    importance_dt['importance'] = min_max_sca(importance_dt['importance'])

    #将数据框类型转换为json型
    importance_dict = dict()
    for row, value in importance_dt.iterrows():
        importance_dict[value['colname']] = value['importance']

    importance_json = json.dumps(importance_dict, ensure_ascii = False)

    return importance_json

def feature_correlation(data_feature): #特征两两相关系数, 要求全部是数值型
    """
    功能：特征相关系数
    :param data_feature: 自变量
    :param data_label: 目标变量
    :param model_type: 模型类型
    :return:
    """
    cor_matrix = data_feature.corr().applymap(lambda x:round(x, 2)) #相关系数矩阵

    #去掉有缺失值的行和列
    cor_matrix = remove_all_na(cor_matrix)
    #cor_matrix = cor_matrix.dropna(axis=[0, 1], how='all')

    #相关系数矩阵位置调整
    dim = cor_matrix.shape
    dim_y = [i for i in range(dim[1])]
    dim_y.reverse()
    cor_matrix = cor_matrix.iloc[:, dim_y]

    cor_matrix_x_axis = cor_matrix.columns.tolist()
    cor_matrix_y_axis = cor_matrix.index.tolist()
    cor_matrix_value = []
    x_len = len(cor_matrix_x_axis)
    y_len = len(cor_matrix_y_axis)
    for i in range(y_len):
        for j in range(x_len):
            v = cor_matrix.iloc[i, j]
            value_s = [j, i, v]
            cor_matrix_value.append(value_s)

    result = dict()
    result['x_aixs'] = cor_matrix_x_axis
    result['y_axis'] = cor_matrix_y_axis
    result['value'] = cor_matrix_value

    result_json = json.dumps(result, ensure_ascii = False)
    return result_json


def feature_selection(data_feature, data_label, model_type):
    """
    功能：特征筛选
    :param data_feature: 自变量
    :param data_label: 因变量
    :param model_type: 模型类型
    :return:
    """
    #0 列数
    data_feature_name = data_feature.columns.values #所有特征名称
    data_feature_list = np.array(range(data_feature.shape[1])) #特征列数

    #1 移除0方差特征
    data_feature_1, keep_list_var = remove_var_f(data_feature)

    #2 移除高相关性特征
    data_feature_2, keep_list_cor = remove_cor_f(data_feature_1, threshold=0.9)

    #3 通过模型筛选特征,可能去掉所有的变量
    if model_type == '1001': #分类
        data_feature_last, keep_list_model = select_from_model(data_feature_2, data_label, model_type='classifier', method='randomforest')
    if model_type == '1002': #回归
        data_feature_last, keep_list_model = select_from_model(data_feature_2, data_label, model_type='regression')
    keep_list_last = data_feature_list[keep_list_var][keep_list_cor][keep_list_model] #列数
    keep_list_last_name = data_feature_name[keep_list_var][keep_list_cor][keep_list_model] #列名

    #4 判断是否需要回退到第2步
    if data_feature_last.shape[1] == 0:#如果最后没有变量保留，则退回上一步
        data_feature_last = data_feature
        keep_list_last = data_feature_list[keep_list_var][keep_list_cor]
        keep_list_last_name = data_feature_name[keep_list_var][keep_list_cor]
    #return data_feature_last, data_label, keep_list_last

    result = dict()
    result['recommend_feature'] = keep_list_last_name.tolist()
    result_json = json.dumps(result, ensure_ascii = False)
    return result_json

# 获取模型训练时间
def get_model_time(model_name, sample_nums, feature_nums):
    #m 是样本数量，n是特征数量
    sample_nums = sample_nums /10000
    time_weight = {'逻辑回归':0.005,'随机森林':0.01,'lgb':0.016,
                   'xgboost':0.06,'svm':0.3,'神经网络':6, '线性回归':0.005,
                   'svr': 0.3,'gdbt':0.016}

    weight = time_weight[model_name]
    if model_name == 'svm':
        model_cost_time = weight * pow(sample_nums,2) * feature_nums
    else:
        model_cost_time = weight * sample_nums * feature_nums
    model_cost_time += 1# 避免数据太小为0的情况
    return int(model_cost_time)

def check_param_FE(param_info):
    key_list = list(param_info.keys())
    result = True
    for i in ['taskid', 'projectid', 'modelType', 'targetName', 'variable']:
        if i not in key_list:
            result = False
    return result

def feature_engineering(param_info):
    """
    特征工程
    :param type: json格式参数
    :return: json格式结果（1)特征重要程度排序 （2）变量两两相关系数 （3）默认选择的变量
    """
    # 连接数据库
    dao = dbtool.Dao()
    response = dict()
    try:
        #公用参数
        task_id = param_info['taskid']
        project_id = param_info['projectid']
        model_type = param_info['modelType']
        target_name = param_info['targetName']
        variable_name_list = param_info['variable'].split(',')

        #其他参数
        if param_info.get('fileName'): #oss获取数据

            file_name = param_info['fileName']
            file_size = param_info['fileSize']
        else: #sql获取数据
            if param_info.get('tableName'): #通过表名读数
                table_name = param_info.get('tableName')
                sql = 'select * from {}'.format(table_name)
            else: #通过sql语句读数
                sql = param_info['sql']
            conn_id = param_info['connId']
            conn_id = int(conn_id)

        #将id转换为数值型
        task_id = int(task_id)
        project_id = int(project_id)

        #添加记录、更新状态
        create_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')

        try:
            dao.insert_from_dict('feature_engineering',{'id': task_id, 'project_id':project_id, 'model_type':model_type, 'variable_before':param_info['variable'], 'status': 'running', 'create_time':create_time, 'target_name':target_name})
        except Exception as e: #如果插入数据库错误直接返回
            response['ret'] = 1
            response['msg'] = errorcode.feature_engineering_1
            return response

        try:
            #判断是哪种方式获取数据
            if param_info.get('fileName'): #oss获取数据
                #获取数据
                data = dataupload.get_data_from_oss(file_name, file_size)

                #将参数写入表
                # 将参数写入表
                dao.update_from_dict('feature_engineering',
                                     {'file_name': file_name, 'file_size': file_size},
                                     limit_map={'id': task_id})

            else: #sql获取数据
                #获取数据
                data = dataupload.get_data_from_database(dao, conn_id = conn_id, sql_syntax = sql)

                #将参数写入表
                dao.update_from_dict('feature_engineering',
                                     {'conn_id':conn_id, 'sql_syntax':sql},
                                     limit_map={'id': task_id})
        except Exception as e:
            error_code_msg = '1002' + ':' + errorcode.feature_engineering_1002
            raise Exception(error_code_msg)

        # 将日期型转换为天数
        data = transform_to_days(data)

        #特征、标签
        data_feature = data[variable_name_list]
        data_label = data[target_name]

        #预估时间
        m = data.shape[0] #数据记录条数
        n = data_feature.shape[1] #特征个数
        #spend_time = get_model_time('随机森林', m, n)
        spend_time = round(m * 0.00144, 2) #通过实验得出
        dao.update_from_dict('feature_engineering',
                             {'remain_time': spend_time},
                             limit_map={'id': task_id})

        #数据验证
        ld = loaddata.LoadData()
        try:
            ld.data_verify(data, data_feature, data_label, model_type)  # 如果验证不通过直接报错，通过则执行下面的步骤(是否要加上模型类型参数)
        except Exception as e:
            error_code_msg = '1003' + ':' + errorcode.feature_engineering_1003
            raise Exception(error_code_msg)

        #特征处理
        data_feature, data_label, all_map = ld.data_manipulate(data_feature, data_label, model_type)

        #特征提取
        # result_importance = None
        # result_correlation = None
        # result_recommend = None
        try:
            # 0 重要程度排序
            result_importance = feature_importance(data_feature, data_label, model_type)

            # 1 变量间两两相关系数
            result_correlation = feature_correlation(data_feature)

            # 2 默认选择的变量
            result_recommend = feature_selection(data_feature, data_label, model_type)
        except Exception as e:
            error_code_msg = '1004' + ':' + errorcode.feature_engineering_1004
            raise Exception(error_code_msg)

        #结果
        result = dict()
        result['feature_importance'] = result_importance
        result['feature_correlation'] = result_correlation
        result['feature_selection'] = result_recommend


        #更新表
        code_er = 200
        msg_er = '特征工程成功'
        error_information = json.dumps({'code': code_er, 'msg': msg_er}, ensure_ascii=False)
        dao.update_from_dict('feature_engineering', {'importance':result_importance, 'correlation':result_correlation, 'default_selection': result_recommend, 'status':'complete', 'error_information':error_information}, limit_map = {'id':task_id})

        #更新状态
        response['ret'] = 0
        response['msg'] = errorcode.feature_engineering_0
    except Exception as e:
        # 更新状态
        try:
            code_er = int(str(e).split(':')[0])
            msg_er = str(e).split(':')[1]
        except Exception as e:
            code_er = 500
            msg_er = '特征工程失败'
        try:
            error_information = json.dumps({'code':code_er, 'msg':msg_er}, ensure_ascii=False)
            status = 'error'
            dao.update_from_dict('feature_engineering', {'error_information': error_information, 'status': status},limit_map={'id': task_id})
        except Exception as e:
            pass

        # 更新响应状态
        response['ret'] = 1
        response['msg'] = errorcode.feature_engineering_1
    return response


if __name__ == '__main__':
    #oss
    type = 'oss'
    params = {'taskid': '133', 'fileName':'HRLeft.csv', 'fileSize':'', 'targetName':'left', 'modelType':'1001', 'variable':'satisfaction_level,last_evaluation,number_project,average_montly_hours,time_spend_company,Work_accident,promotion_last_5years,sales,salary'}
    result = feature_engineering(param_info = params)

    # #database
    # type = 'database'
    # params = {'taskid':1, 'connId':1, 'sqlId':'select * from iris', 'targetName':'Species', 'modelType':'1001', 'variable':'satisfaction_level,last_evaluation,number_project,average_montly_hours,time_spend_company,Work_accident,promotion_last_5years,sales,salary'}
    # result = feature_engineering(type=type, param_info=params)


    # """
    # 员工离职预测
    # """
    # def test():
    #     from sklearn.svm import SVC
    #     from sklearn.linear_model import LogisticRegression
    #     from sklearn.ensemble import RandomForestClassifier
    #     from lightgbm import  LGBMClassifier
    #
    #     data_hr = pd.read_csv(r'D:\BaiduNetdiskDownload\kaggle案例课程\员工离职预测\HR_comma_sep.csv')
    #
    #     #数据探索
    #     data_hr.dtypes
    #
    #     #变量转换
    #     import loaddata
    #     ld = loaddata.LoadData(1243522598709497858, 'iris.txt', '3867')
    #     data_hr['sales'],y_sales = ld.var_map(data_hr['sales'])
    #     data_hr['salary'], y_salary = ld.var_map(data_hr['salary'])
    #
    #     for i in ['svm', 'logit', 'randforest', 'lgb']:
    #
    #         data_hr_feature = data_hr.drop(columns='left', axis=1)
    #         data_hr_label = data_hr['left']
    #
    #         if i == 'svm':
    #             model_n = SVC()
    #             model_y = SVC()
    #         elif i == 'logit':
    #             model_n = LogisticRegression()
    #             model_y = LogisticRegression()
    #         elif i == 'randforest':
    #             model_n = RandomForestClassifier()
    #             model_y = RandomForestClassifier()
    #         elif i == 'lgb':
    #             model_n = LGBMClassifier()
    #             model_y = LGBMClassifier()
    #
    #         # 0 不筛选特征
    #         X_train, X_test, y_train, y_test = train_test_split(data_hr_feature, data_hr_label, train_size=0.75, random_state=123)
    #         #model_n = LogisticRegression()
    #         model_n.fit(X_train, y_train)
    #         y_pred = model_n.predict(X_test)
    #         score_n = accuracy_score(y_test, y_pred)
    #
    #         # 1 筛选特征
    #         data_hr_feature, data_hr_label, keep_col = feature_selection(data_hr_feature, data_hr_label, '1001')
    #         X_train, X_test, y_train, y_test = train_test_split(data_hr_feature, data_hr_label, train_size=0.75, random_state=123)
    #         #model_y = LogisticRegression()
    #         model_y.fit(X_train, y_train)
    #         y_pred = model_y.predict(X_test)
    #         score_y = accuracy_score(y_test, y_pred)
    #
    #         print(score_n, score_y)
    #
    # test()

    # data_feature = load_iris().data
    # data_label = load_iris().target
    # data_feature = pd.DataFrame(data_feature, columns=['X_' + str(i) for i in range(data_feature.shape[1])])
    # data_label = pd.Series(data_label)
    #
    # model_type = '1001'
    # feature_importance(data_feature, data_label, model_type, prob= '0.8') #按比例
    # feature_importance(data_feature, data_label, model_type, k='3') #按个数






