# 数据处理和分析
import pandas as pd
import numpy as np

# 可视化（如果需要使用）
import matplotlib.pyplot as plt
import seaborn as sns

# 统计模型
import statsmodels.api as sm
import statsmodels.formula.api as smf

# 机器学习和特征选择
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, cross_val_score, KFold, StratifiedKFold
from sklearn.pipeline import Pipeline
from sklearn.model_selection import GridSearchCV
from sklearn.linear_model import LogisticRegression, SGDClassifier
from sklearn.feature_selection import RFECV, SelectFromModel
from sklearn.ensemble import ExtraTreesClassifier, RandomForestClassifier

# 模型优化（如果你打算使用贝叶斯优化等高级优化技术）
# from bayes_opt import BayesianOptimization

# 其他可能需要用到的库
import xgboost as xgb
import lightgbm as lgb
# from attrdict import AttrDict  # 如果你在代码中用到了AttrDict，请确保导入它
import joblib  # 用于保存和加载模型

# 时间（如果需要）
import time

def get_lr_formula(model, X):
    # 将截距转换为DataFrame
    intercept = pd.DataFrame({'估计值': model.intercept_})
    intercept.index = ['Intercept']
    
    # 将系数转换为DataFrame
    coef = pd.DataFrame(model.coef_.T, columns=['估计值'], index=X.columns)
    
    # 合并截距和系数
    formula = pd.concat([intercept, coef])
    
    # 重置index并命名列
    formula.reset_index(inplace=True)
    formula.columns = ['参数', '估计值']
    
    return formula

def pick_variables(x, y, discover=True, method="rfecv", threshold=0.25, sls=0.05):#默认阈值0.25
    if method == "rfecv":  # 使用RFECV进行特征选择
        estimator = LogisticRegression(penalty='l2', solver='liblinear')
        selector = RFECV(estimator, step=1, cv=5, scoring='accuracy')
        selector = selector.fit(x, y)
        x = x.loc[:, selector.support_]
    
    elif method =="bs" and x.shape[1] > 1: 
        data = pd.concat([x, y], axis=1)
        var_list = x.columns.tolist()
        response = y.name
        while True:
            formula = "{} ~ {} + 1".format(response, ' + '.join(var_list))
            mod = smf.logit(formula, data).fit(disp=0)
            p_list = mod.pvalues.sort_values()
            if p_list[-1] > sls:
                var = p_list.index[-1]
                var_list.remove(var)           
            else:
                break
        x = x[var_list]

    elif method =="fs":   
        data = pd.concat([x, y], axis=1)
        response = y.name
        remaining = set(x.columns)
        selected = []
        current_score, best_new_score = 0.0, 0.0
        while remaining and current_score == best_new_score:
            scores_with_candidates = []
            for candidate in remaining:
                formula = "{} ~ {} + 1".format(response, ' + '.join(selected + [candidate]))
                mod = smf.logit(formula, data).fit(disp=0)
                score = mod.prsquared
                scores_with_candidates.append((score, candidate))
            scores_with_candidates.sort(reverse=True)
            best_new_score, best_candidate = scores_with_candidates.pop(0)
            if current_score < best_new_score:
                remaining.remove(best_candidate)
                selected.append(best_candidate)
                current_score = best_new_score               
        x = x[selected]

    elif method =="fs_bs":  
        data = pd.concat([x, y], axis=1)
        response = y.name
        remaining = set(x.columns)
        selected = []
        current_score, best_new_score = 0.0, 0.0
        while remaining and current_score == best_new_score:
            scores_with_candidates = []
            for candidate in remaining:
                formula = "{} ~ {} + 1".format(response, ' + '.join(selected + [candidate]))
                mod = smf.logit(formula, data).fit(disp=0)
                score = mod.prsquared
                scores_with_candidates.append((score, candidate))
            scores_with_candidates.sort(reverse=True)
            best_new_score, best_candidate = scores_with_candidates.pop(0)
            if current_score < best_new_score:
                remaining.remove(best_candidate)
                selected.append(best_candidate)
                current_score = best_new_score
            
            formula2 = "{} ~ {} + 1".format(response, ' + '.join(selected))
            mod2 = smf.logit(formula2, data).fit(disp=0)
            p_list = mod2.pvalues.sort_values()
            if p_list[-1] > sls:
                var = p_list.index[-1]
                selected.remove(var)
                formula3 = "{} ~ {} + 1".format(response, ' + '.join(selected))
                mod3 = smf.logit(formula3, data).fit(disp=0)
                best_new_score = mod3.prsquared
                current_score = best_new_score 
        x = x[selected]

    elif method =="rfc":   
        RFC = RandomForestClassifier(n_estimators=200,max_depth=5,class_weight="balanced")
        RFC_Model = RFC.fit(x,y)
        featureImportance = {col: imp for col, imp in zip(x.columns, RFC_Model.feature_importances_)}
        featureImportanceSorted = sorted(featureImportance.items(), key=lambda x: x[1], reverse=True)
        features_selection = [k[0] for k in featureImportanceSorted[:15]] 
        x = x[features_selection]
        x['intercept'] = 1
        LR = sm.Logit(y, x).fit(disp=0)
        print(LR.summary())
        x = x.drop("intercept", axis=1)
    return x

def model_optimizing(x, y, model="LR"):
    if model == "LR":
        pipeline = Pipeline([('lr', LogisticRegression(solver='liblinear', class_weight="balanced"))])
        parameters = {
            'lr__penalty': ('l1', 'l2'),
            'lr__C': (0.01, 0.1, 1, 10),
            'lr__max_iter': (80, 150, 100)
        }
    elif model=="sgd":
        pipeline = Pipeline([
            ('sgd', SGDClassifier(loss='log'))
        ])
        parameters = {
            'sgd__alpha': (0.00001, 0.000001, 0.0001),
            'sgd__penalty': ('l1', 'l2', 'elasticnet'),
            'sgd__n_iter': (10, 50, 5)
        }

    grid_search = GridSearchCV(pipeline, parameters, n_jobs=6, scoring='recall', cv=5)
    grid_search.fit(x, y)
    print('Best score: %0.3f' % grid_search.best_score_)
    print('Best parameters set:')
    best_parameters = grid_search.best_estimator_.get_params()
    for param_name in sorted(parameters.keys()):
        print('\t%s: %r' % (param_name, best_parameters[param_name])) 
    print("Grid scores on development set:")
    means = grid_search.cv_results_['mean_test_score']
    stds = grid_search.cv_results_['std_test_score']
    for mean, std, params in zip(means, stds, grid_search.cv_results_['params']):
        print("%0.3f (+/-%0.03f) for %r"
              % (mean, std * 2, params))
    return best_parameters