# # from collections import Counter
# #
# # import pandas as pd
# # import numpy as np
# # from imblearn.over_sampling import SMOTE
# # from sklearn.linear_model import LogisticRegression
# # from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score
# # from sklearn.model_selection import train_test_split, GridSearchCV
# # import joblib
# # from sklearn.preprocessing import StandardScaler
# #
# # from utils.Feature_selection import feature_engineering
# # param_grid = {
# #     'penalty': ['l1', 'l2', 'elasticnet', None],
# #     'C': [0.001, 0.01, 0.1, 1, 10, 100],
# #     'solver': ['lbfgs', 'liblinear', 'sag', 'saga'],
# #     'class_weight': [None, 'balanced'],
# #     'l1_ratio': [0.1, 0.5, 0.9]  # 仅在 penalty='elasticnet' 时有效
# # }
# #
# #
# # def train_model():
# #     x, y ,x_list=feature_engineering('../new_project/data/train.csv')
# #     x=pd.get_dummies(x)
# #     smote = SMOTE(random_state=42)
# #     X_res, y_res = smote.fit_resample(x, y)
# #
# #     x_train,x_test,y_train,y_test=train_test_split(X_res,y_res,test_size=0.4,random_state=64297,stratify=y)
# #
# #     transfer=StandardScaler()
# #     x_train=transfer.fit_transform(x_train)
# #     x_test=transfer.transform(x_test)
# #
# #     es=LogisticRegression()
# #     gv_es=GridSearchCV(estimator=es,param_grid=param_grid,cv=5)
# #
# #     gv_es.fit(x_train,y_train)
# #     best_model = gv_es.best_estimator_
# #     y_pre=best_model.predict(x_test)
# #     y_score =best_model.predict_proba(x_test)[:, 1]
# #     print("精确率:", precision_score(y_test, y_pre))  # 修正参数顺序
# #     print("ROC AUC:", roc_auc_score(y_test, y_score))
# #     print(y.value_counts())
# #     joblib.dump(gv_es,"../data/model_k_2025_30.pkl")
# #
# # if __name__ == '__main__':
# #     train_model()
#
#
# from collections import Counter
# import pandas as pd
# import numpy as np
# from imblearn.over_sampling import SMOTE
# from sklearn.linear_model import LogisticRegression
# from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score
# from sklearn.model_selection import train_test_split, GridSearchCV
# import joblib
# from sklearn.preprocessing import StandardScaler
#
# from utils.Feature_selection import feature_engineering
#
# # 修正参数网格：elasticnet必须配合solver='saga'
# param_grid = {
#     'penalty': ['l1', 'l2', 'elasticnet'],
#     'C': [0.001, 0.01, 0.1, 1, 10, 100],
#     'solver': ['liblinear', 'saga'],  # 移除了不兼容的solver
#     'class_weight': [None, 'balanced'],
#     'l1_ratio': [0.1, 0.5, 0.9]  # 仅在 penalty='elasticnet' 时有效
# }
#
#
# def train_model():
#     # 加载数据
#     x, y, x_list = feature_engineering('../new_project/data/train.csv')
#     x = pd.get_dummies(x)
#
#     # 先拆分再SMOTE（避免数据泄露）
#     x_train, x_test, y_train, y_test = train_test_split(
#         x, y, test_size=0.4, random_state=64297, stratify=y
#     )
#
#     # 仅对训练集过采样
#     smote = SMOTE(random_state=42)
#     x_train, y_train = smote.fit_resample(x_train, y_train)
#
#     # 标准化
#     transfer = StandardScaler()
#     x_train = transfer.fit_transform(x_train)
#     x_test = transfer.transform(x_test)
#
#     # 模型训练
#     es = LogisticRegression(max_iter=1000)  # 增加迭代次数
#     gv_es = GridSearchCV(
#         estimator=es,
#         param_grid=param_grid,
#         cv=5,
#         scoring='f1'  # 使用F1分数作为优化目标
#     )
#     gv_es.fit(x_train, y_train)
#
#     # 评估
#     best_model = gv_es.best_estimator_
#     y_pre = best_model.predict(x_test)
#     y_score = best_model.predict_proba(x_test)[:, 1]
#
#     print("数据分布（重采样后）:", Counter(y_train))  # 显示过采样后的分布
#     print("精确率:", precision_score(y_test, y_pre))
#     print("召回率:", recall_score(y_test, y_pre))
#     print("F1分数:", f1_score(y_test, y_pre))
#     print("ROC AUC:", roc_auc_score(y_test, y_score))
#
#     # 保存模型
#     joblib.dump(gv_es, "../data/model_k_2025_30.pkl")
#
#
#
# if __name__ == '__main__':
#     train_model()



import pandas as pd
import numpy as np
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, accuracy_score
from sklearn.model_selection import train_test_split, GridSearchCV
import joblib
from utils.Feature_selection import feature_engineering

param_grid = {
    'penalty': ['l1', 'l2', 'elasticnet', None],
    'C': [0.001, 0.01, 0.1, 1, 10, 100],
    'solver': ['lbfgs', 'liblinear', 'sag', 'saga'],
    'class_weight': [None, 'balanced'],
    # 'l1_ratio': [0.1, 0.5, 0.9]  # 仅在 penalty='elasticnet' 时有效
}

x, y, x_list = feature_engineering('../new_project/data/train.csv')
x = pd.get_dummies(x)

X_train, X_test, y_train, y_test = train_test_split(x, y, test_size=0.4, random_state=64297, stratify=y)

es = LogisticRegression()

new_es = GridSearchCV(estimator=es, param_grid=param_grid, cv=5, n_jobs=-1, scoring='accuracy')

new_es.fit(X_train, y_train)

y_pre = new_es.predict(X_test)
y_prob = new_es.predict_proba(X_test)[:, 1]

joblib.dump(new_es, '../data/245.pkl')

print("Precision:", precision_score(y_test, y_pre))  # 修改为使用y_pre
print("Recall:", recall_score(y_test, y_pre))
print("F1:", f1_score(y_test, y_pre))
print("ROC AUC:", roc_auc_score(y_test, y_prob))
print("最佳参数:", new_es.best_params_)
print("最佳得分:", new_es.best_score_)
print('准确率：',accuracy_score(y_test, y_pre))