#

#大数据-用户画像-10-用户复购预测 -迟迦宁-20250212


from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
import lightgbm as lgb

x = train_df.drop(['user_id','merchant_id','label'],axis=1)
y = train_df['label']

x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=.2, random_state = 42)

from sklearn import metrics
from matplotlib import pyplot as plt

def plot_roc(y_test, y_score):
    fpr,tpr,threshold = metrics.roc_curve(y_test, y_score)
    roc_auc = metrics.auc(fpr,tpr)
    plt.stackplot(fpr, tpr, color='steelblue', alpha = 0.5, edgecolor = 'black')
    plt.plot(fpr, tpr, color='black', lw = 1)
    plt.plot([0,1],[0,1], color = 'red', linestyle = '--')
    plt.text(0.5,0.3,'ROC curve (area = %0.2f)' % roc_auc)
    plt.xlabel('FPR')
    plt.ylabel('TPR')
    plt.title('ROC Curve')
    plt.show()

    # 数据不平衡，recall很差
    lr = LogisticRegression(solver='liblinear')
    lr.fit(x_train, y_train)
    lr_pred = lr.predict(x_val)
    lr_proba = lr.predict_proba(x_val)
    print('模型的评估报告：\n', classification_report(y_val, lr_pred))
    plot_roc(y_val, lr_proba[:, 1])

#balance一下
lr = LogisticRegression(solver='liblinear',class_weight='balanced')
lr.fit(x_train, y_train)
lr_pred = lr.predict(x_val)
lr_proba = lr.predict_proba(x_val)
print('模型的评估报告：\n',classification_report(y_val, lr_pred))
plot_roc(y_val, lr_proba[:,1])

params = {
        'verbose':0,
        'min_data_in_leaf': 4,
        'objective': 'binary',
        'metric': 'auc',
        'num_leaves': 32,
        'n_estimators':2000,
        #'max_bin': 50,
        'max_depth':5,
        "learning_rate": 0.01,
        "colsample_bytree": 0.8,  # 每次迭代中随机选择特征的比例
        "bagging_fraction": 0.8,  # 每次迭代时用的数据比例
        'n_jobs': -1,
        'silent': 1,  # 信息输出设置成1则没有信息输出
        'seed': 42,
        'bagging_freq':3,
        'is_unbalance':True,
        #'scale_pos_weight': wt
    }  #设置出参数

gbm = lgb.LGBMClassifier(**params)
gbm.fit(x_train, y_train,
            eval_metric='auc',
    eval_set=[(x_train, y_train), (x_val, y_val)],
        early_stopping_rounds=30)
gbm_pred = gbm.predict(x_val)
gbm_proba = gbm.predict_proba(x_val)
print('模型的评估报告：\n',classification_report(y_val, gbm_pred))
plot_roc(y_val, gbm_proba[:,1])

print('模型的评估报告：\n',classification_report(y_val, gbm_pred))
plot_roc(y_val, gbm_proba[:,1])


#mlp

import numpy as np
np.random.seed(42)
#import tensorflow.compat.v1 as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout,BatchNormalization
from keras.utils import np_utils
from tensorflow.keras.optimizers import RMSprop,Adam
from matplotlib import pyplot as plt


x = train_df.drop(['user_id','merchant_id','label'],axis=1)
y = train_df['label']

import tensorflow as tf
from sklearn.metrics import roc_auc_score

def auc(y_true, y_pred):
    return tf.py_func(roc_auc_score, (y_true, y_pred), tf.double)


from tensorflow.keras.optimizers import Adam

model = Sequential(name="MLP0")

#全连接
model.add(Dense(input_dim=56, units=128,activation='relu',name='fc1'))
model.add(BatchNormalization(name='bn1'))
model.add(Dropout(0.3,name='drpout1'))
model.add(Dense(256, activation='relu',name='fc2'))
model.add(BatchNormalization(name='bn2'))
model.add(Dropout(0.3,name='drpout2'))
#最后的输出层
model.add(Dense(1, activation='sigmoid',name='output'))

print(model.summary())
"""
把参数都写在最前面是一个好习惯
"""
epoch = 50
batch_size = 128
learning_rate = 0.01
decay_rate = learning_rate / epoch
#momentum = 0.8
adam_optimizer = Adam(learning_rate=0.01,decay=decay_rate) #自定义优化器
from tensorflow.keras.callbacks import EarlyStopping
#monitor:监视参数,min_delta:小于此数认为不变化，mode:loss小好，acc大好，patience:n周期无提升则退出，restore_best_weights:取最优权重
earlyStop = EarlyStopping(monitor='val_auc', min_delta=1e-6, patience=4, mode='max', verbose=1, restore_best_weights = True)
#增加validation_data参数作为验证集，添加早停止机制，训练时打乱序列顺序
#model.fit(x_train, y_train, callbacks=[earlyStop],  epochs=10, batch_size=32, validation_split=0.1, verbose = 1,shuffle=True)

model.compile(optimizer = adam_optimizer,
             loss='binary_crossentropy',
              metrics=[tf.keras.metrics.AUC(curve='ROC', name = 'auc')]
             )

model.fit(x, y, epochs = epoch, callbacks=[earlyStop],class_weight={0:1,1:15},batch_size=batch_size, validation_split=0.1, verbose = 1,shuffle=True)

#loss, accuracy = model.evaluate(x_test, y_test)
#print('test loss:{}\t'.format(loss),'\ntest accuracy:{}'.format(accuracy))


from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report

x = train_df.drop(['user_id','merchant_id','label'],axis=1)
y = train_df['label']

x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=.2, random_state = 42)

from sklearn import metrics
from matplotlib import pyplot as plt

def plot_roc(y_test, y_score):
    fpr,tpr,threshold = metrics.roc_curve(y_test, y_score)
    roc_auc = metrics.auc(fpr,tpr)
    plt.stackplot(fpr, tpr, color='steelblue', alpha = 0.5, edgecolor = 'black')
    plt.plot(fpr, tpr, color='black', lw = 1)
    plt.plot([0,1],[0,1], color = 'red', linestyle = '--')
    plt.text(0.5,0.3,'ROC curve (area = %0.2f)' % roc_auc)
    plt.xlabel('FPR')
    plt.ylabel('TPR')
    plt.title('ROC Curve')
    plt.show()


mlp_proba = model.predict(x_val)


#print('模型的评估报告：\n',classification_report(y_val, gbm_pred))
plot_roc(y_val, mlp_proba)

# 最后输出预测结果
prob = gbm.predict_proba(test_df.drop(['user_id','merchant_id'],axis=1))
test_df['prob'] = pd.Series(prob[:,1])
test_df[['user_id, merchant_id','prob']].to_csv('prediction.csv')


pred = pd.read_csv('prediction.csv')
pred.head()