# encoding: utf-8

import pandas as pd
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkFiles

df1 = spark.sql("select * from idealsh.add_all_paizhao_202111_1")

pdf3 = df1.toPandas()
print(pdf3.head(10))
print(pdf3.shape)

strtoint = ['comp_serv_flag',
            'present_1x_duration',
            'net_flow',
            'yy_init_val',
            'yy_accu_val',
            'll_init_val',
            'll_use_val',
            'll_accu_val',
            'mon_flux_rate',
            'amount_3avg',
            'online_len',
            'flux_1x',
            'active_day_num',
            'rate_duration',
            'offs_amount',
            'terminal_flag_5g',
            'terminal_price',
            'fuka_num',
            'arpu_average_3',
            'fair_amount_sum',
            'max_fk_num',
            'free_fk_limit',
            'coefficient',
            'if_only_ty',
            'is_three_null_user']


def convert_dtype(j):
    try:
        a = float(j)
    except BaseException:
        a = 0
    return a


for i in strtoint:
    pdf3[i] = pdf3[i].apply(convert_dtype)
    print('********', i, '----')
    med = pdf3[i].median()
    pdf3[i] = pdf3[i].fillna(value=med)
    # print(i)

print('done')

import time


def convert_day(day):
    try:
        day1 = day
        # 中间过程，一般都需要将字符串转化为时间数组
        day1 = time.strptime(day1, '%Y-%m-%d %H:%M:%S')
        day2 = "2021-12-01 00:00:00"
        day2 = time.strptime(day2, '%Y-%m-%d %H:%M:%S')
        day_num = (int(time.mktime(day2)) - int(time.mktime(day1))) / (24 * 60 * 60)
    except BaseException:
        day_num = 0
    return day_num


pdf3['bill_date_new'] = pdf3['bill_date'].astype(str).apply(convert_day)
pdf3['lte_sim_change_date_new'] = pdf3['lte_sim_change_date'].astype(str).apply(convert_day)
print('done')

# str字段处理
strcolumns=['business_id',
'lte_card_type',
'channel_type',
'net_flag',
# 'tianyi_offer_id',
'network_standard',
'brand_name']

pdf3[strcolumns] = pdf3[strcolumns].fillna('')

pdf3['brand_name'] = pdf3['brand_name'].apply(lambda x: x.lower())
pdf3.loc[(pdf3['brand_name'] == 'apple'), 'brand_name'] = '苹果'
pdf3.loc[(pdf3['brand_name'] == '维沃'), 'brand_name'] = 'vivo'
pdf3.loc[(pdf3['brand_name'] == '欧珀'), 'brand_name'] = 'oppo'


import joblib
from sklearn import metrics
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
import heapq
from sklearn import preprocessing
from pyspark.sql import SparkSession
from pyspark.sql import Row
from pyspark import SparkFiles
import numpy as np
import pandas as pd


for i in strcolumns:
    le = preprocessing.LabelEncoder()
    file_pre = 'strcolumns_process_{}_1115_newmonth9_4to5_v2.model'.format(i)
    le_fit = joblib.load(filename=file_pre)
    fliter_h = le_fit.classes_.tolist()
    pdf3 = pdf3[pdf3[i].isin(fliter_h)]
    pdf3 = pdf3.reset_index(drop=True)
    pdf3[i] = pd.DataFrame(le_fit.transform(pdf3[[i]]))
    print(i)
print('done')
print(len(pdf3))

featuercolumns =['comp_serv_flag',
'present_1x_duration',
'net_flow',
'yy_init_val',
'yy_accu_val',
'll_init_val',
'll_use_val',
'll_accu_val',
'mon_flux_rate',
'amount_3avg',
'online_len',
'flux_1x',
'active_day_num',
'rate_duration',
'offs_amount',
'terminal_flag_5g',
'terminal_price',
'fuka_num',
'arpu_average_3',
'fair_amount_sum',
'max_fk_num',
'free_fk_limit',
'coefficient',
'if_only_ty',
'is_three_null_user',
'business_id',
'lte_card_type',
'channel_type',
'net_flag',
'tianyi_offer_id',
'network_standard',
'brand_name',
'bill_date_new',
'lte_sim_change_date_new']

X_test = pdf3[featuercolumns]
print('done')

cvf = joblib.load(filename='lgb_1115newmonth10_v2.model')

pred_ = cvf.predict_proba(X_test)
print('done')

df_ = pd.DataFrame(pred_, columns=['resu_0', 'resu_1'])
df_['if_'] = df_['resu_1'].apply(lambda x: 1 if x >= 0.2 else 0)
print(df_['if_'].value_counts())

df_merge = pd.concat([pdf3[['serv_id','cust_id']],df_['resu_1']],axis=1)

df_merge['serv_id']=df_merge['serv_id'].astype(str)
df_merge['cust_id']=df_merge['cust_id'].astype(str)

df_merge.to_csv(r'df_pz_11_09_01.csv',index=False)
print(df_merge.head(10))
print(df_merge.shape)

import pandas as pd
from pyspark import SparkContext,SparkConf
from pyspark.sql import HiveContext
from pyspark.sql.types import *


df_merge_1 = pd.read_csv('df_pz_11_08_01.csv')
df_merge_2 = pd.read_csv('df_pz_11_08_02.csv')
df_merge_3 = pd.read_csv('df_pz_11_08_03.csv')

df_merge_4 = pd.concat([df_merge_1,df_merge_2])
df_merge = pd.concat([df_merge_3,df_merge_4])

# df_merge = pd.read_csv(r'df_test_1118.csv')

print(df_merge.head(10))
print(df_merge.info())

df_merge['serv_id']=df_merge['serv_id'].astype(str)
df_merge['cust_id']=df_merge['cust_id'].astype(str)

##写表
df_test = df_merge
sqlContext = HiveContext(sc)
schema = StructType([
	StructField("cust_id", StringType(), True)
    # StructField("resu_1", DoubleType(), True)
    ])
sdf = sqlContext.createDataFrame(df_test,schema)
sdf.registerTempTable("temp_test")
###写入已存在的表
# sqlContext.sql("insert overwrite table it_eda_hive.tmp_dyg_drh_test2 select * from temp_test")
###创建新表并写入
sqlContext.sql("create table idealsh.df_test_1118 select * from temp_test")