# 导入feature_preprocessing.py文件
import numpy as np
import pandas as pd
from collections import Counter
from datetime import datetime
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
import gc
import warnings
from pylab import mpl
from sklearn.model_selection import *
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_curve, roc_auc_score

# 设置显示中文字体
plt.rcParams['font.sans-serif'] = ['PingFang SC', 'SimHei', 'Songti SC']
plt.rcParams['axes.unicode_minus'] = False

# 设置正常显示符号

warnings.filterwarnings('ignore')

# # 读取数据
# test_data = pd.read_csv(r'../../data/raw/data_format1/test_format1.csv')
# train_data = pd.read_csv(r'../../data/raw/data_format1/train_format1.csv')
# user_info = pd.read_csv(r'../../data/raw/data_format1/user_info_format1.csv')
# user_log = pd.read_csv(r'../../data/raw/data_format1/user_log_format1.csv')


# 用于减少内存使用
# def reduce_mem_usage(df, verbose=True):
#     start_mem = df.memory_usage().sum() / 1024 ** 2
#     numerics = ['int16', 'int32', 'int64', 'float16', 'float32', 'float64']
#
#     for col in df.columns:
#         col_type = df[col].dtypes
#         if col_type in numerics:
#             c_min = df[col].min()
#             c_max = df[col].max()
#             if str(col_type)[:3] == 'int':
#                 if c_min > np.iinfo(np.int8).min and c_max < np.iinfo(np.int8).max:
#                     df[col] = df[col].astype(np.int8)
#                 elif c_min > np.iinfo(np.int16).min and c_max < np.iinfo(np.int16).max:
#                     df[col] = df[col].astype(np.int16)
#                 elif c_min > np.iinfo(np.int32).min and c_max < np.iinfo(np.int32).max:
#                     df[col] = df[col].astype(np.int32)
#                 elif c_min > np.iinfo(np.int64).min and c_max < np.iinfo(np.int64).max:
#                     df[col] = df[col].astype(np.int64)
#             else:
#                 if c_min > np.finfo(np.float16).min and c_max < np.finfo(np.float16).max:
#                     df[col] = df[col].astype(np.float16)
#                 elif c_min > np.finfo(np.float32).min and c_max < np.finfo(np.float32).max:
#                     df[col] = df[col].astype(np.float32)
#                 else:
#                     df[col] = df[col].astype(np.float64)
#
#     end_mem = df.memory_usage().sum() / 1024 ** 2
#     print('Memory usage after optimization is: {:.2f} MB'.format(end_mem))
#     print('Decreased by {:.1f}%'.format(100 * (start_mem - end_mem) / start_mem))
#     print('===' * 30)
#     return df


#  内存优化
# train_data = reduce_mem_usage(train_data)
# test_data = reduce_mem_usage(test_data)
# user_info = reduce_mem_usage(user_info)
# user_log = reduce_mem_usage(user_log)


# def feature_engineering():
#     # print(train_data[train_data['label'] == 1])
#     # print(user_log[(user_log['user_id'] == 34176) & (user_log['seller_id'] == 3906)])
#     #  age_range,gender特征添加
#     df_train = pd.merge(train_data, user_info, on="user_id", how="left")
#     # print(df_train.head())
#     total_logs_temp = user_log.groupby([user_log["user_id"], user_log["seller_id"]]).count().reset_index()[
#         ["user_id", "seller_id", "item_id"]]
#     total_logs_temp.rename(columns={"seller_id": "merchant_id", "item_id": "total_logs"}, inplace=True)
#     df_train = pd.merge(df_train, total_logs_temp, on=["user_id", "merchant_id"], how="left")
#
#     unique_item_ids_temp = \
#         user_log.groupby([user_log["user_id"], user_log["seller_id"], user_log["item_id"]]).count().reset_index()[
#             ["user_id", "seller_id", "item_id"]]
#     unique_item_ids_temp1 = unique_item_ids_temp.groupby(
#         [unique_item_ids_temp["user_id"], unique_item_ids_temp["seller_id"]]).count().reset_index()
#     unique_item_ids_temp1.rename(columns={"seller_id": "merchant_id", "item_id": "unique_item_ids"}, inplace=True)
#     df_train = pd.merge(df_train, unique_item_ids_temp1, on=["user_id", "merchant_id"], how="left")
#
#     categories_temp = \
#         user_log.groupby([user_log["user_id"], user_log["seller_id"], user_log["cat_id"]]).count().reset_index()[
#             ["user_id", "seller_id", "cat_id"]]
#     categories_temp1 = categories_temp.groupby(
#         [categories_temp["user_id"], categories_temp["seller_id"]]).count().reset_index()
#     categories_temp1.rename(columns={"seller_id": "merchant_id", "cat_id": "categories"}, inplace=True)
#     df_train = pd.merge(df_train, categories_temp1, on=["user_id", "merchant_id"], how="left")
#
#     browse_days_temp = \
#         user_log.groupby([user_log["user_id"], user_log["seller_id"], user_log["time_stamp"]]).count().reset_index()[
#             ["user_id", "seller_id", "time_stamp"]]
#     browse_days_temp1 = browse_days_temp.groupby(
#         [browse_days_temp["user_id"], browse_days_temp["seller_id"]]).count().reset_index()
#     browse_days_temp1.rename(columns={"seller_id": "merchant_id", "time_stamp": "browse_days"}, inplace=True)
#     df_train = pd.merge(df_train, browse_days_temp1, on=["user_id", "merchant_id"], how="left")
#
#     one_clicks_temp = \
#         user_log.groupby([user_log["user_id"], user_log["seller_id"], user_log["action_type"]]).count().reset_index()[
#             ["user_id", "seller_id", "action_type", "item_id"]]
#     one_clicks_temp.rename(columns={"seller_id": "merchant_id", "item_id": "times"}, inplace=True)
#     one_clicks_temp["one_clicks"] = one_clicks_temp["action_type"] == 0
#     one_clicks_temp["one_clicks"] = one_clicks_temp["one_clicks"] * one_clicks_temp["times"]
#     one_clicks_temp["shopping_carts"] = one_clicks_temp["action_type"] == 1
#     one_clicks_temp["shopping_carts"] = one_clicks_temp["shopping_carts"] * one_clicks_temp["times"]
#     one_clicks_temp["purchase_times"] = one_clicks_temp["action_type"] == 2
#     one_clicks_temp["purchase_times"] = one_clicks_temp["purchase_times"] * one_clicks_temp["times"]
#     one_clicks_temp["favourite_times"] = one_clicks_temp["action_type"] == 3
#     one_clicks_temp["favourite_times"] = one_clicks_temp["favourite_times"] * one_clicks_temp["times"]
#     four_features = one_clicks_temp.groupby(
#         [one_clicks_temp["user_id"], one_clicks_temp["merchant_id"]]).sum().reset_index()
#     four_features = four_features.drop(["action_type", "times"], axis=1)
#     df_train = pd.merge(df_train, four_features, on=["user_id", "merchant_id"], how="left")
#     # print(df_train.info())
#     #   缺失值统计
#     print(df_train.isnull().sum(axis=0))
#     # age_range、gender缺失值填充
#     df_train['age_range'] = df_train['age_range'].fillna(-1).astype(int)
#     df_train['gender'] = df_train['gender'].fillna(2).astype(int)
#     #   缺失值统计
#     print(df_train.isnull().sum(axis=0))
#
#     return df_train


df_train = pd.read_csv('../../data/processed/train_v1.csv')

Y = df_train['label']
X = df_train.drop(['user_id', 'merchant_id', 'label'], axis=1)
# print(X.head(10))
X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.25, random_state=10)
# 一般的准确率验证方法
Logit = LogisticRegression(solver='liblinear')
Logit.fit(X_train, y_train)
Predict = Logit.predict(X_test)
Predict_proba = Logit.predict_proba(X_test)
print(Predict[0:20])
print(Predict_proba[:])
# Score = accuracy_score(y_test, Predict)
# print(f'准确率：{Score}')

# 初始化逻辑回归算法
LogRegAlg = LogisticRegression(random_state=1, solver='liblinear')
re = LogRegAlg.fit(X, Y)
# 使用sklearn库里面的交叉验证函数获取预测准确率分数
scores = cross_val_score(LogRegAlg, X, Y, cv=3)
# 使用交叉验证分数的平均值作为最终的准确率
print("准确率为: ", scores.mean())

# 获取预测概率（注意使用 predict_proba）
y_scores = Logit.predict_proba(X_test)[:, 1]  # 取正类的概率
# 计算 AUC 值
auc = roc_auc_score(y_test, y_scores)
# 计算 FPR, TPR
fpr, tpr, thresholds = roc_curve(y_test, y_scores)
# 输出auc面积
print(f'AUC: {auc:.4f}')
# 绘制 ROC 曲线
plt.figure(figsize=(8, 6))
plt.plot(fpr, tpr, color='blue', label=f'ROC Curve (AUC = {auc:.4f})')
plt.plot([0, 1], [0, 1], 'k--')  # 对角线
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve for Logistic Regression')
plt.legend(loc='lower right')
plt.grid(True)
plt.show()

