# encoding: utf-8

# author: hufei_neo
# blog: https://blog.csdn.net/hufei_neo
# email: hufei_neo@163.com
# file: test.py
# time: 2021-12-30 15:33

import time

def convert_day(day):
    try:
        day1 = day
        # 中间过程，一般都需要将字符串转化为时间数组
        day1 = time.strptime(day1, '%Y-%m-%d %H:%M:%S')
        day2 = "2021-12-01 00:00:00"
        day2 = time.strptime(day2, '%Y-%m-%d %H:%M:%S')
        day_num = (int(time.mktime(day2)) - int(time.mktime(day1))) / (24 * 60 * 60)
    except BaseException:
        day_num = 0
    return day_num


pdf3['bill_date_new'] = pdf3['bill_date'].astype(str).apply(convert_day)
pdf3['lte_sim_change_date_new'] = pdf3['lte_sim_change_date'].astype(str).apply(convert_day)
print('done')


import time

def convert_day(day):
    try:
        day1 = day
        # 中间过程，一般都需要将字符串转化为时间数组
        day1 = time.strptime(day1, '%Y-%m-%d %H:%M:%S')
        day2 = "2021-12-01 00:00:00"
        day2 = time.strptime(day2, '%Y-%m-%d %H:%M:%S')
        day_num = (int(time.mktime(day2)) - int(time.mktime(day1))) / (24 * 60 * 60)
    except BaseException:
        day_num = 0
    return day_num


pdf3['bill_date_new'] = pdf3['bill_date'].astype(str).apply(convert_day)
pdf3['lte_sim_change_date_new'] = pdf3['lte_sim_change_date'].astype(str).apply(convert_day)
print('done')

# str字段处理
strcolumns=['business_id',
'lte_card_type',
'channel_type',
'net_flag',
'network_standard',
'brand_name']

pdf3[strcolumns] = pdf3[strcolumns].fillna('')

pdf3['brand_name'] = pdf3['brand_name'].apply(lambda x: x.lower())
pdf3.loc[(pdf3['brand_name'] == 'apple'), 'brand_name'] = '苹果'
pdf3.loc[(pdf3['brand_name'] == '维沃'), 'brand_name'] = 'vivo'
pdf3.loc[(pdf3['brand_name'] == '欧珀'), 'brand_name'] = 'oppo'


import joblib
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import heapq
from sklearn import preprocessing
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkFiles
import numpy as np
import pandas as pd

for i in strcolumns:
    le = preprocessing.LabelEncoder()
    le_fit = le.fit(pdf3[[i]])
    pdf3[i] = pd.DataFrame(le_fit.transform(pdf3[[i]]))
    file_pre = 'strcolumns_process_{}_1214_newmonth8_4to5_v2.model'.format(i)
    joblib.dump(filename=file_pre, value=le_fit)
    print(i)
print(pdf3[strcolumns].head(10))

print('done')


# for i in strcolumns:
#     le = preprocessing.LabelEncoder()
#     file_pre = 'strcolumns_process_{}_1214_newmonth8_4to5_v2.model'.format(i)
#     le_fit = joblib.load(filename=file_pre)
#     fliter_h = le_fit.classes_.tolist()
#     pdf3 = pdf3[pdf3[i].isin(fliter_h)]
#     pdf3 = pdf3.reset_index(drop=True)
#     pdf3[i] = pd.DataFrame(le_fit.transform(pdf3[[i]]))
#     print(i)
# print('done')
# print(len(pdf3))

featuercolumns =['comp_serv_flag',
'present_1x_duration',
'net_flow',
'yy_init_val',
'yy_accu_val',
'll_init_val',
'll_use_val',
'll_accu_val',
'mon_flux_rate',
'amount_3avg',
'online_len',
'flux_1x',
'active_day_num',
'rate_duration',
'offs_amount',
'terminal_flag_5g',
'terminal_price',
'fuka_num',
'arpu_average_3',
'fair_amount_sum',
'max_fk_num',
'free_fk_limit',
'coefficient',
'if_only_ty',
'is_three_null_user',
'business_id',
'lte_card_type',
'channel_type',
'net_flag',
'network_standard',
'brand_name',
'bill_date_new',
'lte_sim_change_date_new']

# X_train, y_train = pdf3[featuercolumns], pdf3['offer_flag_5g3']
# print('done')

X_test, y_test = pdf3[featuercolumns], pdf3['offer_flag_5g3']
print('done')

for i in list(range(1, 12, 2)):
    # n_estimators=100,max_depth=15,n_jobs=40,min_samples_split=5,oob_score=True,class_weight={0:30,1:1}
    cvf = RandomForestClassifier(n_estimators=100, max_depth=i, n_jobs=40,
                                 min_samples_split=200, min_samples_leaf=40, max_features=40, oob_score=False)
    cvf.fit(X_train, y_train)

    # 将训练的模型保存到磁盘(value=模型名)   默认当前文件夹下
    # joblib.dump(filename='RF_month7_dkzc_lab7.model', value=cvf)

    a = cvf.feature_importances_
    x = heapq.nlargest(10, range(len(a)), a.take)
    b = X_train.columns
    que = []
    for i in x:
        print(i, b[i], a[i] * 100)

    pred_gl = cvf.predict_proba(X_test)
    df_ = pd.DataFrame(pred_gl, columns=['resu_0', 'resu_1'])

    df_['if_2'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.2 else 0)
    df_['if_3'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.3 else 0)
    df_['if_4'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.4 else 0)
    df_['if_5'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.5 else 0)

    # print(df_)
    # print(df_['if_'].value_counts())

    df_ = df_.reset_index(drop=True)
    # df_.head(10)
    y_test = y_test.reset_index(drop=True)
    # y_test.head(10)
    # 数据合并
    df_all = pd.concat([y_test, df_], axis=1)
    # print(df_all)
    print('gailv大于0.2用户的预测情况', df_all[df_all['if_2'] == 1]['if_chaiji'].value_counts())
    print('gailv大于0.3用户的预测情况', df_all[df_all['if_3'] == 1]['if_chaiji'].value_counts())
    print('gailv大于0.4用户的预测情况', df_all[df_all['if_4'] == 1]['if_chaiji'].value_counts())
    print('gailv大于0.5用户的预测情况', df_all[df_all['if_5'] == 1]['if_chaiji'].value_counts())


# X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.2, random_state=42)
    lgb_cf = lightgbm.LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,
                  importance_type='split', learning_rate=0.2, max_depth=-1,
                  min_child_samples=20, min_child_weight=0.001, min_split_gain=0.0,
                  n_estimators=100, n_jobs=-1, num_leaves=31, objective=None,
                  random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,
                  subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
    lgb_cf.fit(X_train, y_train,categorical_feature=strcolumns)

cvf = lightgbm.LGBMClassifier(boosting_type='gbdt', class_weight=None, colsample_bytree=1.0,
              importance_type='split', learning_rate=0.05, max_depth=i,
              min_child_samples=90, min_child_weight=0.001, min_split_gain=0.0,
              n_estimators=100, n_jobs=30, num_leaves=8, objective=None,
              random_state=None, reg_alpha=0.0, reg_lambda=0.0, silent=True,
              subsample=1.0, subsample_for_bin=200000, subsample_freq=0)
cvf.fit(X_train, y_train,categorical_feature=strcolumns)

cvf.fit(X_train, y_train)


