# encoding: utf-8

# author: hufei_neo
# blog: https://blog.csdn.net/hufei_neo
# email: hufei_neo@163.com
# file: train_12.py
# time: 2022-01-04 15:00


import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkFiles

df1 = spark.sql("select * from idealsh.add_all_month_202108 where offer_flag_5g3 =0 limit 1000000")
df2 = spark.sql("select * from idealsh.add_all_month_202108 where offer_flag_5g3 =1 limit 1000000")

pdf1 = df1.toPandas()
pdf2 = df2.toPandas()
pdf3 = pd.concat([pdf1, pdf2])
pdf3['month_id'] = '202108'
# print(pdf3.head(10))
print(pdf3.shape)

df4 = spark.sql("select * from idealsh.add_all_month_202109 where offer_flag_5g3 =0 limit 200000")
df5 = spark.sql("select * from idealsh.add_all_month_202109 where offer_flag_5g3 =1 limit 40000")

pdf4 = df4.toPandas()
pdf5 = df5.toPandas()
pdf6 = pd.concat([pdf4, pdf5])
pdf6['month_id'] = '202109'
# print(pdf3.head(10))
print(pdf6.shape)

pdf7 = pd.concat([pdf3, pdf6])
pdf3 = pdf7
print(pdf3.shape)

strtoint = ['comp_serv_flag',
            'present_1x_duration',
            'net_flow',
            'yy_init_val',
            'yy_accu_val',
            'll_init_val',
            'll_use_val',
            'll_accu_val',
            'mon_flux_rate',
            'amount_3avg',
            'online_len',
            'flux_1x',
            'active_day_num',
            'rate_duration',
            'offs_amount',
            'terminal_flag_5g',
            'terminal_price',
            'fuka_num',
            'arpu_average_3',
            'fair_amount_sum',
            'max_fk_num',
            'free_fk_limit',
            'coefficient',
            'if_only_ty',
            'is_three_null_user']


def convert_dtype(j):
    try:
        a = float(j)
    except BaseException:
        a = 0
    return a


for i in strtoint:
    pdf3[i] = pdf3[i].apply(convert_dtype)
    print('********', i, '----')
    med = pdf3[i].median()
    pdf3[i] = pdf3[i].fillna(value=med)
    # print(i)

print('done')


def convert_day(day):
    try:
        day1 = day
        # 中间过程，一般都需要将字符串转化为时间数组
        day1 = time.strptime(day1, '%Y-%m-%d %H:%M:%S')
        day2 = "2021-12-01 00:00:00"
        day2 = time.strptime(day2, '%Y-%m-%d %H:%M:%S')
        day_num = (int(time.mktime(day2)) - int(time.mktime(day1))) / (24 * 60 * 60)
    except BaseException:
        day_num = 0
    return day_num


pdf3['bill_date_new'] = pdf3['bill_date'].astype(str).apply(convert_day)
pdf3['lte_sim_change_date_new'] = pdf3['lte_sim_change_date'].astype(str).apply(convert_day)
print('done')



# str字段处理
strcolumns=['business_id',
'lte_card_type',
'channel_type',
'net_flag',
'network_standard',
'brand_name']

pdf3[strcolumns] = pdf3[strcolumns].fillna('')

pdf3['brand_name'] = pdf3['brand_name'].apply(lambda x: x.lower())
pdf3.loc[(pdf3['brand_name'] == 'apple'), 'brand_name'] = '苹果'
pdf3.loc[(pdf3['brand_name'] == '维沃'), 'brand_name'] = 'vivo'
pdf3.loc[(pdf3['brand_name'] == '欧珀'), 'brand_name'] = 'oppo'

import joblib
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import heapq
from sklearn import preprocessing
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkFiles
import numpy as np
import pandas as pd

for i in strcolumns:
    le = preprocessing.LabelEncoder()
    le_fit = le.fit(pdf3[[i]])
    pdf3[i] = pd.DataFrame(le_fit.transform(pdf3[[i]]))
    file_pre = 'strcolumns_process_{}_1214_newmonth8_4to5_v2.model'.format(i)
    #joblib.dump(filename=file_pre, value=le_fit)
    print(i)
print(pdf3[strcolumns].head(10))

print('done')

featuercolumns =['comp_serv_flag',
'present_1x_duration',
'net_flow',
'yy_init_val',
'yy_accu_val',
'll_init_val',
'll_use_val',
'll_accu_val',
'mon_flux_rate',
'amount_3avg',
'online_len',
'flux_1x',
'active_day_num',
'rate_duration',
'offs_amount',
'terminal_flag_5g',
'terminal_price',
'fuka_num',
'arpu_average_3',
'fair_amount_sum',
'max_fk_num',
'free_fk_limit',
'coefficient',
'if_only_ty',
'is_three_null_user',
'business_id',
'lte_card_type',
'channel_type',
'net_flag',
'network_standard',
'brand_name',
'bill_date_new',
'lte_sim_change_date_new']

X_train, y_train = pdf3.loc[pdf3.month_id=='202108',featuercolumns],pdf3.loc[pdf3.month_id=='202108','offer_flag_5g3']
print(X_train.shape,y_train.shape)

X_test, y_test = pdf3.loc[pdf3.month_id=='202109',featuercolumns],pdf3.loc[pdf3.month_id=='202109','offer_flag_5g3']
print(X_test.shape,y_test.shape)


import joblib
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import heapq
from sklearn import preprocessing
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkFiles
import numpy as np
import pandas as pd

for i in list(range(100, 500, 50)):
    # n_estimators=100,max_depth=15,n_jobs=40,min_samples_split=5,oob_score=True,class_weight={0:30,1:1}
    cvf = RandomForestClassifier(n_estimators=45, max_depth=6, n_jobs=40,
                                 min_samples_split=i, min_samples_leaf=40, max_features=30, oob_score=False)
    cvf.fit(X_train, y_train)

    # 将训练的模型保存到磁盘(value=模型名)   默认当前文件夹下
    # joblib.dump(filename='RF_month7_dkzc_lab7.model', value=cvf)

    a = cvf.feature_importances_
    x = heapq.nlargest(10, range(len(a)), a.take)
    b = X_train.columns
    que = []
    for i in x:
        print(i, b[i], a[i] * 100)

    pred_gl = cvf.predict_proba(X_test)
    df_ = pd.DataFrame(pred_gl, columns=['resu_0', 'resu_1'])

    df_['if_2'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.2 else 0)
    df_['if_3'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.3 else 0)
    df_['if_4'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.4 else 0)
    df_['if_5'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.5 else 0)

    # print(df_)
    # print(df_['if_'].value_counts())

    df_ = df_.reset_index(drop=True)
    # df_.head(10)
    y_test = y_test.reset_index(drop=True)
    # y_test.head(10)
    # 数据合并
    df_all = pd.concat([y_test, df_], axis=1)
    # print(df_all)
    print('gailv大于0.2用户的预测情况', df_all[df_all['if_2'] == 1]['offer_flag_5g3'].value_counts())
    print('gailv大于0.3用户的预测情况', df_all[df_all['if_3'] == 1]['offer_flag_5g3'].value_counts())
    print('gailv大于0.4用户的预测情况', df_all[df_all['if_4'] == 1]['offer_flag_5g3'].value_counts())
    print('gailv大于0.5用户的预测情况', df_all[df_all['if_5'] == 1]['offer_flag_5g3'].value_counts())

cvf = RandomForestClassifier(n_estimators=45, max_depth=6, n_jobs=40,
                             min_samples_split=150, min_samples_leaf=40, max_features=30, oob_score=False)

cvf.fit(X_train, y_train)

a = cvf.feature_importances_
x = heapq.nlargest(30, range(len(a)), a.take)
b = X_train.columns
que = []
for i in x:
    print(i, b[i], a[i] * 100)
    que.append(i)

y_rf = cvf.predict(X_test)

import heapq
import pandas as pd
import lightgbm

for i in list(range(3, 12, 3)):
    # n_estimators=100,max_depth=15,n_jobs=40,min_samples_split=5,oob_score=True,class_weight={0:30,1:1}
    cvf = lightgbm.LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,
                  importance_type='split', learning_rate=0.05, max_depth=i,
                  min_child_samples=90, min_child_weight=0.001, min_split_gain=0.0,
                  n_estimators=100, n_jobs=30, num_leaves=8, objective=None,
                  random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,
                  subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
    cvf.fit(X_train, y_train,categorical_feature=strcolumns)
    cvf.fit(X_train, y_train)

    # 将训练的模型保存到磁盘(value=模型名)   默认当前文件夹下
    # joblib.dump(filename='RF_month7_dkzc_lab7.model', value=cvf)

    a = cvf.feature_importances_
    x = heapq.nlargest(10, range(len(a)), a.take)
    b = X_train.columns
    que = []
    for i in x:
        print(i, b[i], a[i] * 100)

    pred_gl = cvf.predict_proba(X_test)
    df_ = pd.DataFrame(pred_gl, columns=['resu_0', 'resu_1'])

    df_['if_4'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.4 else 0)
    df_['if_5'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.5 else 0)
    df_['if_6'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.6 else 0)
    df_['if_7'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.7 else 0)

    # print(df_)
    # print(df_['if_'].value_counts())

    df_ = df_.reset_index(drop=True)
    # df_.head(10)
    y_test = y_test.reset_index(drop=True)
    # y_test.head(10)
    # 数据合并
    df_all = pd.concat([y_test, df_], axis=1)
    # print(df_all)
    print('gailv大于0.4用户的预测情况', df_all[df_all['if_4'] == 1]['offer_flag_5g3'].value_counts())
    print('gailv大于0.5用户的预测情况', df_all[df_all['if_5'] == 1]['offer_flag_5g3'].value_counts())
    print('gailv大于0.6用户的预测情况', df_all[df_all['if_6'] == 1]['offer_flag_5g3'].value_counts())
    print('gailv大于0.7用户的预测情况', df_all[df_all['if_7'] == 1]['offer_flag_5g3'].value_counts())

