import pandas as pd
train_df = pd.read_csv('train.csv')
test_df = pd.read_csv('test.csv')
user_info = pd.read_csv('./user_info_format1.csv')
user_log = pd.read_csv('./user_log_format_1.csv').rename(columns={'seller_id':'merchant_id'})

user_info['gender'].fillna(2, inplace=True) 
user_info['age_range'].fillna(-1, inplace=True)
user_log['brand_id'].fillna(-1, inplace=True)

user_info['gender'].fillna(2, inplace=True) #2和null都代表性别不确定
user_info['age_range'].fillna(-1, inplace=True)
user_log['brand_id'].fillna(-1, inplace=True)

from matplotlib import pyplot as plt
plt.figure(figsize=(12,4))
plt.subplot(1,3,1)
plt.hist(user_log['time_stamp']) #time_tamp 购买时间（格式：mmdd）
plt.title('time_stamp')
plt.subplot(1,3,2)
plt.hist(user_log['action_type'])
plt.title('action_type') #action_type包含{0, 1, 2, 3}，0表示单击，1表示添加到购物车，2表示购买，3表示添加到收藏夹
plt.subplot(1,3,3)
plt.hist(user_info['gender']) #gender用户性别。0表示女性，1表示男性，2和NULL表示未知
plt.title('gender')
plt.show()

from matplotlib import pyplot as plt
plt.plot(user_log[user_log['action_type']==2].groupby('time_stamp').count()['action_type'])
plt.show()

seller_group = user_log.groupby(["seller_id","action_type"]).count()[["user_id"]].reset_index().rename(columns={'user_id':'count'})
import gc
del user_log
gc.collect()

seller_feature = seller_group[seller_group['action_type']==0][['seller_id','count']].reset_index()[['seller_id','count']].rename(
    columns={'count':'click_count'})

def _get_action_cnt(num):
    seller_df = seller_group[seller_group['action_type']==num]
    cnt_list = []
    for i in seller_feature['seller_id']:
        l = list(seller_df['count'][seller_df['seller_id']==i])
        if l:
            cnt_list.append(l[0])
        else:
            cnt_list.append(0)
    return cnt_list
            
seller_feature['cart_count'] = _get_action_cnt(1)
seller_feature['sell_count'] = _get_action_cnt(2)
seller_feature['star_count'] = _get_action_cnt(3)
seller_feature.head()

train_df = train_df.merge(seller_feature,on="merchant_id",how='left').drop(['seller_id'],axis=1)
test_df = test_df.merge(seller_feature,on="merchant_id",how='left').drop(['seller_id'],axis=1)
train_df.to_csv('train_v1.csv', index=False)
test_df.to_csv('test_v1.csv',index=False)

train_df = pd.read_csv('./train_v1.csv')
test_df = pd.read_csv('./test_v1.csv')

for i in (0,1,3):
    user_group = user_log[(user_log['time_stamp']<1111) & (user_log['action_type']==i)].groupby(
        ["user_id","merchant_id"]).count()[["action_type"]].reset_index()
    train_df = train_df.merge(user_group, on=['user_id','merchant_id'],how='left').rename(
        columns={'action_type':'action_type{}'.format(i)})
    test_df = test_df.merge(user_group, on=['user_id','merchant_id'],how='left').rename(
        columns={'action_type':'action_type{}'.format(i)})

for i in (0,1,3):
    user_group = user_log[(user_log['time_stamp']==1111) & (user_log['action_type']==i)].groupby(
        ["user_id","merchant_id"]).count()[["action_type"]].reset_index()
    train_df = train_df.merge(user_group, on=['user_id','merchant_id'],how='left').rename(
        columns={'action_type':'action_type{}_in1111'.format(i)})
    test_df = test_df.merge(user_group, on=['user_id','merchant_id'],how='left').rename(
        columns={'action_type':'action_type{}_in1111'.format(i)})

train_df = train_df.fillna(0)
test_df = test_df.fillna(0)

for i in (0,1,3):
    train_df['action{}_rate'.format(i)] = train_df.apply(lambda x:x['action_type{}_in1111'.format(i)]/x['action_type{}'.format(i)] if x['action_type{}'.format(i)]>0 else -1, axis=1)
    test_df['action{}_rate'.format(i)] = test_df.apply(lambda x:x['action_type{}_in1111'.format(i)]/x['action_type{}'.format(i)] if x['action_type{}'.format(i)]>0 else -1, axis=1)

train_df.to_csv('./train_v2.csv')
test_df.to_csv('./test_v2.csv')

from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report
import lightgbm as lgb

x = train_df.drop(['user_id','merchant_id','label'],axis=1)
y = train_df['label']

x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=.2, random_state = 42)
from sklearn import metrics
from matplotlib import pyplot as plt

def plot_roc(y_test, y_score):
    fpr,tpr,threshold = metrics.roc_curve(y_test, y_score)
    roc_auc = metrics.auc(fpr,tpr)
    plt.stackplot(fpr, tpr, color='steelblue', alpha = 0.5, edgecolor = 'black')
    plt.plot(fpr, tpr, color='black', lw = 1)
    plt.plot([0,1],[0,1], color = 'red', linestyle = '--')
    plt.text(0.5,0.3,'ROC curve (area = %0.2f)' % roc_auc)
    plt.xlabel('FPR')
    plt.ylabel('TPR')
    plt.title('ROC Curve')
    plt.show()

lr = LogisticRegression(solver='liblinear')
lr.fit(x_train, y_train)
lr_pred = lr.predict(x_val)
lr_proba = lr.predict_proba(x_val)
print('模型的评估报告：\n',classification_report(y_val, lr_pred))
plot_roc(y_val, lr_proba[:,1])

lr = LogisticRegression(solver='liblinear',class_weight='balanced')
lr.fit(x_train, y_train)
lr_pred = lr.predict(x_val)
lr_proba = lr.predict_proba(x_val)
print('模型的评估报告：\n',classification_report(y_val, lr_pred))
plot_roc(y_val, lr_proba[:,1])

params = {
        'verbose':0,
        'min_data_in_leaf': 4,
        'objective': 'binary',
        'metric': 'auc',
        'num_leaves': 32,
        'n_estimators':2000,
        #'max_bin': 50,
        'max_depth':5,
        "learning_rate": 0.01,
        "colsample_bytree": 0.8,  # 每次迭代中随机选择特征的比例
        "bagging_fraction": 0.8,  # 每次迭代时用的数据比例
        'n_jobs': -1,
        'silent': 1,  # 信息输出设置成1则没有信息输出
        'seed': 42,
        'bagging_freq':3,
        'is_unbalance':True,
        #'scale_pos_weight': wt
    }  #设置出参数

gbm = lgb.LGBMClassifier(**params)
gbm.fit(x_train, y_train, 
            eval_metric='auc',
    eval_set=[(x_train, y_train), (x_val, y_val)],
        early_stopping_rounds=30)
gbm_pred = gbm.predict(x_val)
gbm_proba = gbm.predict_proba(x_val)
print('模型的评估报告：\n',classification_report(y_val, gbm_pred))
plot_roc(y_val, gbm_proba[:,1])
print('模型的评估报告：\n',classification_report(y_val, gbm_pred))
plot_roc(y_val, gbm_proba[:,1])
import numpy as np
np.random.seed(42)
#import tensorflow.compat.v1 as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout,BatchNormalization
from keras.utils import np_utils
from tensorflow.keras.optimizers import RMSprop,Adam
from matplotlib import pyplot as plt
x = train_df.drop(['user_id','merchant_id','label'],axis=1)
y = train_df['label']

from tensorflow.keras.utils import to_categorical
y = to_categorical(y)
import tensorflow as tf
from sklearn.metrics import roc_auc_score

def auc(y_true, y_pred):
    return tf.py_func(roc_auc_score, (y_true, y_pred), tf.double)
from tensorflow.keras.optimizers import Adam

model = Sequential(name="MLP0")

#全连接
model.add(Dense(input_dim=56, units=128,activation='relu',name='fc1'))
model.add(BatchNormalization(name='bn1'))
model.add(Dropout(0.3,name='drpout1'))
model.add(Dense(256, activation='relu',name='fc2'))
model.add(BatchNormalization(name='bn2'))
model.add(Dropout(0.3,name='drpout2'))
#最后的输出层
model.add(Dense(1, activation='sigmoid',name='output'))

print(model.summary())
"""
把参数都写在最前面是一个好习惯
"""
epoch = 50 
batch_size = 128
learning_rate = 0.01
decay_rate = learning_rate / epoch
#momentum = 0.8
adam_optimizer = Adam(learning_rate=0.01,decay=decay_rate) #自定义优化器
from tensorflow.keras.callbacks import EarlyStopping
#monitor:监视参数,min_delta:小于此数认为不变化，mode:loss小好，acc大好，patience:n周期无提升则退出，restore_best_weights:取最优权重
earlyStop = EarlyStopping(monitor='val_auc', min_delta=1e-6, patience=4, mode='max', verbose=1, restore_best_weights = True)
#增加validation_data参数作为验证集，添加早停止机制，训练时打乱序列顺序
#model.fit(x_train, y_train, callbacks=[earlyStop],  epochs=10, batch_size=32, validation_split=0.1, verbose = 1,shuffle=True)

model.compile(optimizer = adam_optimizer,
             loss='binary_crossentropy',
              metrics=[tf.keras.metrics.AUC(curve='ROC', name = 'auc')]
             )

model.fit(x, y, epochs = epoch, callbacks=[earlyStop],class_weight={0:1,1:15},batch_size=batch_size, validation_split=0.1, verbose = 1,shuffle=True)

#loss, accuracy = model.evaluate(x_test, y_test)
#print('test loss:{}\t'.format(loss),'\ntest accuracy:{}'.format(accuracy))

from sklearn.model_selection import train_test_split
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report

x = train_df.drop(['user_id','merchant_id','label'],axis=1)
y = train_df['label']

x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=.2, random_state = 42)

from sklearn import metrics
from matplotlib import pyplot as plt

def plot_roc(y_test, y_score):
    fpr,tpr,threshold = metrics.roc_curve(y_test, y_score)
    roc_auc = metrics.auc(fpr,tpr)
    plt.stackplot(fpr, tpr, color='steelblue', alpha = 0.5, edgecolor = 'black')
    plt.plot(fpr, tpr, color='black', lw = 1)
    plt.plot([0,1],[0,1], color = 'red', linestyle = '--')
    plt.text(0.5,0.3,'ROC curve (area = %0.2f)' % roc_auc)
    plt.xlabel('FPR')
    plt.ylabel('TPR')
    plt.title('ROC Curve')
    plt.show()
mlp_proba = model.predict(x_val)

plot_roc(y_val, mlp_proba)

prob = gbm.predict_proba(test_df.drop(['user_id','merchant_id'],axis=1))
test_df['prob'] = pd.Series(prob[:,1])
test_df[['user_id, merchant_id','prob']].to_csv('prediction.csv')
pred = pd.read_csv('prediction.csv')
pred.head()