import numpy as np
import pandas as pd
import seaborn as sns
import lightgbm as lgb
from lightgbm import LGBMClassifier
import sklearn
from sklearn.metrics import roc_auc_score, precision_recall_curve, roc_curve, average_precision_score
from sklearn.model_selection import KFold, StratifiedKFold
from sklearn.preprocessing import LabelEncoder
import gc
import re
from dateutil.relativedelta import relativedelta
import math


# tools
def sum1(dataframe,col,dfname = 'dataframe'): #查看离散化数据的统计情况, dataframe 是pandas格式的DataFrame, col 是列名称, dfname是数据表名称
    result = {}
    total = 0
    for i in dataframe[col] :
        if i not in result :
            result[i] = 1
        else :
            result[i] = result[i] + 1
            total += 1
    return result

def sum2(data,col) :
    print(data[col].value_counts().index)

def sum3(dataframe,col,dfname = 'dataframe'): #查看离散化数据的统计情况, dataframe 是pandas格式的DataFrame, col 是列名称, dfname是数据表名称
    result = {}
    total = 0
    for i in dataframe[col] :
        if i not in result :
            result[i] = 1
        else :
            result[i] = result[i] + 1
            total += 1
    print(dfname +" : "+ col)
    print("  名称 , 总数 , 比例")
    for i in sorted(result) :
        print('  ' +str(i)+' , '+str(result[i])+' , '+ '%.2f' %(result[i]*100/total) + '%')
    return result

def numerical(data,col,mapping) : #使用键值对mapping将data的列col离散化 并覆盖原有值
    data[col]=data[col].map(mapping)

def saving(num):
    publicpath = '../user_data/' + num + 'public.csv'
    internetpath = '../user_data/' + num + 'internet.csv'
    testpath = '../user_data/' + num + 'test.csv'
    train_public.to_csv(publicpath,index=0)
    train_internet.to_csv(internetpath,index=0)
    test.to_csv(testpath,index=0)

# 按interval为区间离散化数字
def discrete(data,col,interval) :
    data[col]=data[col].apply(lambda x:int(x/interval)*interval)

# 按default概率均值编码 col:列 flg:是否让internet参与统计,1为是
def encoding(col,flg,isnum):
    default_cnt = {} ; tot_cnt = {} ; 
    tot_para=[0,0]; 
    def count_default(row):
        tot_para[0] +=1
        if row['isDefault'] == 1 :
            tot_para[1] +=1
            if isnum==1:
                if np.isnan(row[col])==False:
                    if(int(row[col]) not in default_cnt):
                        default_cnt[ int(row[col]) ] = 1
                    else:
                        default_cnt[ int(row[col]) ] +=1
            else:
                if(row[col] not in default_cnt):
                    default_cnt[ row[col] ] = 1
                else:
                    default_cnt[ row[col] ] +=1
        if isnum==1:
            if np.isnan(row[col])==False:
                if( int(row[col]) not in tot_cnt):
                    tot_cnt[ int(row[col]) ] = 1
                else:
                    tot_cnt[ int(row[col]) ] +=1
        else:
            if(row[col] not in tot_cnt):
                tot_cnt[ row[col] ] = 1
            else:
                tot_cnt[ row[col] ] +=1
    train_public.apply(lambda row : count_default(row), axis=1)
    if(flg==1): train_internet.apply(lambda row : count_default(row), axis=1)
    for i in default_cnt:
        if(tot_cnt[i]/tot_para[0] < 0.005): 
            default_cnt[i] = tot_para[1] / tot_para[0]
        else :
            default_cnt[i] /= tot_cnt[i]
    sorted_result = sorted(default_cnt.items(), key= lambda i:(i[1],i[0]))
    for i in range(len(sorted_result)):
        default_cnt[sorted_result[i][0]] = i+1
    #print(default_cnt)
    def apply_encoding(row):
        if( (row[col] not in default_cnt) or (isnum==1 and np.isnan(row[col]) ) ): return tot_para[1] / tot_para[0]
        return default_cnt[ row[col] ]
    train_public[col]=train_public.apply(lambda row : apply_encoding(row), axis=1)
    train_internet[col]=train_internet.apply(lambda row : apply_encoding(row), axis=1)
    test[col]=test.apply(lambda row: apply_encoding(row), axis=1)

def encoding1(col):
    default_cnt = {} ; tot_cnt = {} ; 
    tot_para=[0,0]; 
    def count_default(row):
        tot_para[0] += 3
        if row['good_man'] >= 1 :
            tot_para[1] += row['good_man']
            if np.isnan(row[col])==False:
                if(int(row[col]) not in default_cnt):
                    default_cnt[ int(row[col]) ] = row['good_man']
                else:
                    default_cnt[ int(row[col]) ] += row['good_man']
        if np.isnan(row[col])==False:
            if( int(row[col]) not in tot_cnt):
                tot_cnt[ int(row[col]) ] = 3
            else:
                tot_cnt[ int(row[col]) ] += 3
    train_public.apply(lambda row : count_default(row), axis=1)
    train_internet.apply(lambda row : count_default(row), axis=1)
    test.apply(lambda row : count_default(row), axis=1)
    for i in default_cnt:
        if(tot_cnt[i]/tot_para[0] < 0.005): 
            default_cnt[i] = tot_para[1] / tot_para[0]
        else :
            default_cnt[i] /= tot_cnt[i]
    sorted_result = sorted(default_cnt.items(), key= lambda i:(i[1],i[0]))
    for i in range(len(sorted_result)):
        default_cnt[sorted_result[i][0]] = i+1
    #print(default_cnt)
    def apply_encoding(row):
        if( (row[col] not in default_cnt) or ( np.isnan(row[col]) ) ): return tot_para[1] / tot_para[0]
        return default_cnt[ row[col] ]
    train_public[col]=train_public.apply(lambda row : apply_encoding(row), axis=1)
    train_internet[col]=train_internet.apply(lambda row : apply_encoding(row), axis=1)
    test[col]=test.apply(lambda row: apply_encoding(row), axis=1)

# Model1 dataProcess

#用pandas库读取数据
train_public=pd.read_csv('../raw_data/train_public.csv')
train_internet=pd.read_csv('../raw_data/train_internet.csv')
test=pd.read_csv('../raw_data/test_public.csv')

#更改网贷数据集的错误表头 is_default -> isDefault
train_internet['isDefault']=train_internet['is_default']
train_internet.drop('is_default',axis=1,inplace=True)

# 处理工作年限 para_workYear_ 为待调整参数
para_workYear_nan = np.nan ; para_workYear_less1 = 0 ; para_workYear_great10 = 15
workYearMap = {
    'nan': para_workYear_nan,
    '< 1 year': para_workYear_less1,
    '1 year':1,
    '2 years':2,
    '3 years':3,
    '4 years':4,
    '5 years':5, 
    '6 years':6,
    '7 years':7,
    '8 years':8,
    '9 years':9,
    '10+ years': para_workYear_great10
}
numerical(train_public,'work_year',workYearMap)
numerical(train_internet,'work_year',workYearMap)
numerical(test,'work_year',workYearMap)
train_public['work_year_sorted']=train_public['work_year']
train_internet['work_year_sorted']=train_internet['work_year']
test['work_year_sorted']=test['work_year']
encoding('work_year_sorted',1,1)

# 处理公司类型
encoding('employer_type',1,0)

# 处理行业
encoding('industry',1,0)

# 处理职位类型 work_type
workTypeMap={
    '公务员': 1,
    '工程师': 2,
    '职员': 3,
    '其他': 4,
    '工人': 5
}
numerical(train_internet,'work_type',workTypeMap)

# 处理 class sub_class
classMap={ 'A': 3,'B': 8,'C': 13,'D': 18,'E': 23,'F': 28,'G': 33 }
numerical(train_public,'class',classMap)
numerical(train_internet,'class',classMap)
numerical(test,'class',classMap)

# 处理日期 将其拆成年月 issue_date earlies_credit_mon
def toComplete(record):
    fd = re.search('(\d+-)', record) #(\d+-)为正则表达式
    if fd is None:
        return '1-'+record
    return record + '-21'

def dateclear(data):
    data['issue_date']=pd.to_datetime(data['issue_date'])
    data['issue_date_month']=data['issue_date'].dt.month
    data['issue_date_year']=data['issue_date'].dt.year
    data['issue_date_workday']=data['issue_date'].dt.dayofweek
    data.drop('issue_date',axis=1,inplace=True)
    data['earlies_credit_mon1']=pd.to_datetime(data['earlies_credit_mon'].map(toComplete))
    data['earlies_mon']=data['earlies_credit_mon1'].dt.month
    data['earlies_day']=data['earlies_credit_mon1'].dt.day
    data['earlies_year']=data['earlies_credit_mon1'].dt.year
    def nanyear(row) :
        fd = re.search('(\d+-)', row['earlies_credit_mon']) #(\d+-)为正则表达式
        if fd is not None: return np.nan
        if row['earlies_year'] > 2021 :
            return row['earlies_year'] -100
        return row['earlies_year'] 
    def nanday(row) :
        fd = re.search('(\d+-)', row['earlies_credit_mon']) #(\d+-)为正则表达式
        if fd is None: return np.nan
        return row['earlies_day']     
    data['earlies_year']= data.apply(lambda row : nanyear(row),axis=1)
    data['earlies_day']= data.apply(lambda row : nanday(row),axis=1)
    data.drop('earlies_credit_mon',axis=1,inplace=True)
    data.drop('earlies_credit_mon1',axis=1,inplace=True)
    def ecm_timestamp(row) :
        if np.isnan(row['earlies_year']) : return np.nan
        res = (row['earlies_year']-1900) * 365
        res += row['earlies_mon'] * 30
        return res+1
    data['ecm_t'] = data.apply(lambda row : ecm_timestamp(row),axis=1)
    def issue_timestamp(row) :
        res = (row['issue_date_year']-1900) * 365
        res += row['issue_date_month'] * 30
        return res +1
    data['issue_t'] = data.apply(lambda row : issue_timestamp(row),axis=1)

dateclear(train_public)
dateclear(train_internet)
dateclear(test)

# 对时间做分析
def encoding2(col):
    train_public[col+'_sorted'] = train_public[col]
    train_internet[col+'_sorted'] = train_internet[col]
    test[col+'_sorted'] = test[col]
    encoding(col+'_sorted',1,1)
encoding2('earlies_mon')
encoding2('issue_date_month')
encoding2('issue_date_workday')

# 添加年龄 用 int( (issue_date_year-earlies_year)/5.0  ) 作为大致年龄
def setage(row):
    if np.isnan(row['earlies_year']) : 
        return np.nan
    else :
        return int( (row['issue_date_year']-row['earlies_year'])//5 )
train_public['age'] = train_public.apply(lambda row : setage(row), axis=1)
train_internet['age'] = train_internet.apply(lambda row : setage(row), axis=1)
test['age'] = test.apply(lambda row : setage(row), axis=1)

# 有提前还款的标记为好人
def setgoodman(row) :
    res = 0
    if row['early_return'] > 0 : res +=1
    if row['early_return_amount'] > 0 : res +=1
    if row['early_return_amount_3mon'] > 0 : res +=1
    return res
train_public['good_man'] = train_public.apply(lambda row : setgoodman(row),axis=1)
train_internet['good_man'] = train_internet.apply(lambda row : setgoodman(row),axis=1)
test['good_man'] = test.apply(lambda row : setgoodman(row),axis=1)
train_public.drop(['early_return','early_return_amount','early_return_amount_3mon'],axis=1,inplace=True)
train_internet.drop(['early_return','early_return_amount','early_return_amount_3mon'],axis=1,inplace=True)
test.drop(['early_return','early_return_amount','early_return_amount_3mon'],axis=1,inplace=True)

# 总借款按10元为间隔离散化
discrete(train_public,'total_loan',50)
discrete(train_internet,'total_loan',50)
discrete(test,'total_loan',50)

# 每月还款按1元为间隔离散化
discrete(train_public,'monthly_payment',10)
discrete(train_internet,'monthly_payment',10)
discrete(test,'monthly_payment',10)

# 计算月收入 假设脏数据为 月收入>5w 且 不是好人
def setincome(row):
    if np.isnan(row['monthly_payment']) or np.isnan(row['debt_loan_ratio']) : return np.nan
    if row['debt_loan_ratio']== 0 : return np.nan
    res = row['monthly_payment'] / (row['debt_loan_ratio']*0.01)
    if row['good_man'] == 0 and res > 50000 : return np.nan
    return int(res/10)*10
train_public['monthly_income']=train_public.apply(lambda row: setincome(row),axis=1)
train_internet['monthly_income']=train_internet.apply(lambda row: setincome(row),axis=1)
test['monthly_income']=test.apply(lambda row: setincome(row),axis=1)

# 分辨城市和乡村 以50人为区分
postcode_cnt = {}
postcode_cnt1 = train_public.groupby('post_code').agg({'loan_id':'count'})
for i,row in postcode_cnt1.iterrows() :
    if row.name not in postcode_cnt :
        postcode_cnt[row.name]= 0
    postcode_cnt[row.name] += row['loan_id']
postcode_cnt2 = train_internet.groupby('post_code').agg({'loan_id':'count'})
for i,row in postcode_cnt2.iterrows() :
    if int(row.name) not in postcode_cnt :
        postcode_cnt[int(row.name)]= 0
    postcode_cnt[int(row.name)] += row['loan_id']
postcode_cnt3 = test.groupby('post_code').agg({'loan_id':'count'})
for i,row in postcode_cnt3.iterrows() :
    if row.name not in postcode_cnt :
        postcode_cnt[row.name]= 0
    postcode_cnt[row.name] += row['loan_id']
def setCitySize(row):
    if np.isnan( row['post_code'] ):
        return np.nan
    if row['post_code'] not in postcode_cnt: return np.nan
    if postcode_cnt[row['post_code']]>50 : return 1
    return 0
train_public['is_city']=train_public.apply(lambda row: setCitySize(row),axis=1)
train_internet['is_city']=train_internet.apply(lambda row: setCitySize(row),axis=1)
test['is_city']=test.apply(lambda row: setCitySize(row),axis=1)

# 计算可支配收入 假设基本开销为 b+b*abs(5-x)/5 x为age值 b=1k 假设age_0 =20岁 age_5 =45-50岁
def realincome(row):
    b = 1500
    if row['is_city'] == 0 : b = 500
    tmp = math.fabs(5-row['age'])/5.0
    res= b*(tmp)
    #if 'known_outstanding_loan' not in row: return row['monthly_income'] - res - row['offsprings']*300 - row['house_loan_status']*1000
    return row['monthly_income'] - res #- row['known_outstanding_loan']*100
train_public['real_income']=train_public.apply(lambda row: realincome(row),axis=1)
train_internet['real_income']=train_internet.apply(lambda row: realincome(row),axis=1)
test['real_income']=test.apply(lambda row: realincome(row),axis=1)

# 计算月供/可支配收入
def realdlr(row):
    if(row['real_income']<=1): return 100
    return row['monthly_payment'] / row['real_income']
train_public['real_debt_loan_ratio']=train_public.apply(lambda row: realdlr(row),axis=1)
train_internet['real_debt_loan_ratio']=train_internet.apply(lambda row: realdlr(row),axis=1)
test['real_debt_loan_ratio']=test.apply(lambda row: realdlr(row),axis=1)

# 计算利率乘class
def resonless1(row):
    if(np.isnan(row['interest'])): return np.nan
    if(np.isnan(row['class'])): return np.nan
    return ((row['interest']*0.01*row['class'])//0.5)*0.5
train_public['rl1']=train_public.apply(lambda row: resonless1(row),axis=1)
train_internet['rl1']=train_internet.apply(lambda row: resonless1(row),axis=1)
test['rl1']=test.apply(lambda row: resonless1(row),axis=1)

# 均值编码postcode
encoding1('post_code')
encoding1('region')

# middlescore
def scoreMid(row):
    return (row['scoring_low'] + row['scoring_high'] )/2.0
train_public['scoring_mid']=train_public.apply(lambda row: scoreMid(row),axis=1)
train_internet['scoring_mid']=train_internet.apply(lambda row: scoreMid(row),axis=1)
test['scoring_mid']=test.apply(lambda row: scoreMid(row),axis=1)

saving('01')

# 模型训练1
def Training3(TrainData,LabeledY,TestData,KFolder,lr): #返回训练后的 ['loan_id','isDefault'] 结果表
    feature=[i for i in TrainData.columns if i not in ['loan_id'] ]
    valPred=np.zeros(TrainData.shape[0]);testPred=np.zeros(TestData.shape[0])
    for n_fold,(i_train,i_val) in enumerate(KFolder.split(TrainData)):
        x_train=TrainData[feature].iloc[i_train];y_train=LabeledY.iloc[i_train]
        x_val=TrainData[feature].iloc[i_val];y_val=LabeledY.iloc[i_val]
        classifier=LGBMClassifier(n_estimators=4000,learning_rate=lr,num_leaves=1024,colsample_bytree=.65,subsample=.9,max_depth=5,max_bin=200,reg_alpha=.3,reg_lambda=.3,
            min_split_gain=.01,min_child_weight=2,silent=-1,verbose=-1,)
        classifier.fit(x_train,y_train,eval_set=[(x_train,y_train),(x_val,y_val)],early_stopping_rounds=45,verbose=0)
        valPred[i_val]=classifier.predict_proba(x_val,num_iteration=classifier.best_iteration_,verbose=0)[:,1]
        testPred+=classifier.predict_proba(TestData[feature], num_iteration=classifier.best_iteration_,verbose=0)[:,1]/KFolder.n_splits
        del x_train,y_train,x_val,y_val,classifier;gc.collect()
    
    TestPred=TestData.copy()
    TestPred['id']=TestPred['loan_id']
    for i in TrainData.columns:
        if i not in ['id']: TestPred.drop(i,axis=1,inplace=True)
    TestPred['isDefault']=testPred
    return TestPred

ti=pd.read_csv('../user_data/01internet.csv')
pu=pd.read_csv('../user_data/01public.csv')
te=pd.read_csv('../user_data/01test.csv')

ti.drop(['user_id'],axis=1,inplace=True)
pu.drop(['user_id'],axis=1,inplace=True)
te.drop(['user_id'],axis=1,inplace=True)

learn_rate = 0.035

# 总的模型
def realTrain2(train_public,train_internet,test):

    # 先对internet做
    sameCols=list(set(train_public.columns).intersection(set(train_internet.columns))) #相同列
    notInternetCols=list(set(train_public.columns).difference(set(sameCols))) #internet多出来的列

    internet_current=train_internet[sameCols].copy() # 用来训练用的internet相同列切片
    for col in notInternetCols: internet_current[col]=np.nan

    public_current = train_public.copy()

    maxRound = 1
    k=KFold(n_splits=5,shuffle=True,random_state=2318710007)
    for i in range(maxRound):
        if len(internet_current) == 0 : break
        x_train=public_current.drop(['isDefault'],axis=1,inplace=False)
        x_test=internet_current.drop(['isDefault'],axis=1,inplace=False)
        y=public_current['isDefault']

        internetPred=Training3(x_train,y,x_test,k,learn_rate) #对着本轮 internet_current 的预测表

        pred0=internetPred.loc[internetPred.isDefault<0.008,'id'].tolist()
        internet0 = internet_current[ internet_current.loan_id.isin(pred0) ].copy()
        internet0 = internet0[ internet0.isDefault == 0 ]
        pred1=internetPred.loc[internetPred.isDefault>1-0.05,'id'].tolist()
        internet1 = internet_current[ internet_current.loan_id.isin(pred1) ].copy()
        internet1 = internet1[ internet1.isDefault == 1 ]
        if len(pred0)==0 and len(pred1)==0 : break

        internet_current = internet_current[ ~internet_current.loan_id.isin(pred0) ]
        internet_current = internet_current[ ~internet_current.loan_id.isin(pred1) ]
        public_current=pd.concat([public_current,internet0,internet1]).reset_index(drop=True)

        print('internet_rest_len=',len(internet_current))

    maxRound =0
    for i in range(maxRound):
        if len(test) == 0 : break
        x_train=public_current.drop(['isDefault'],axis=1,inplace=False)
        x_test=test
        y=public_current['isDefault']

        testPred=Training3(x_train,y,x_test,k,learn_rate) #对test进行预测

        pred0=testPred.loc[testPred.isDefault<0.1,'id'].tolist()
        test0 = test[ test.loan_id.isin(pred0) ].copy()
        test0['isDefault']=0
        pred1=testPred.loc[testPred.isDefault>0.9999999,'id'].tolist()
        test1 = test[ test.loan_id.isin(pred1) ].copy()
        test1['isDefault']=1
        if len(pred0)==0 and len(pred1)==0 : break

        public_current=pd.concat([public_current,test0,test1]).reset_index(drop=True)
        
    x_train=public_current.drop(['isDefault'],axis=1,inplace=False)
    x_test=test
    y=public_current['isDefault']
    testPred=Training3(x_train,y,x_test,k,learn_rate) #对test进行预测

    return testPred

subm = realTrain2(pu,ti,te)
subm.to_csv('../user_data/01submission.csv',index=False)





# Model2 dataProcess
train_public=pd.read_csv('../raw_data/train_public.csv')
test=pd.read_csv('../raw_data/test_public.csv')
train_internet=pd.read_csv('../raw_data/train_internet.csv')
train_internet['isDefault']=train_internet['is_default']
train_internet.drop('is_default',axis=1,inplace=True)

classMap={
    'A':1,
    'B':2,
    'C':3,
    'D':4,
    'E':5,
    'F':6,
    'G':7
}
def setClassMap(row):
    if(row['class'] not in classMap) : return np.nan
    return classMap[row['class']]
train_public['class']=train_public.apply(lambda row: setClassMap(row),axis=1)
train_internet['class']=train_internet.apply(lambda row: setClassMap(row),axis=1)
test['class']=test.apply(lambda row: setClassMap(row),axis=1)

employerMap={
    '上市企业':0,
    '世界五百强':1,
    '幼教与中小学校':2,
    '政府机构':3,
    '普通企业':4,
    '高等教育机构':5
}
def setEmployerMap(row):
    if(row['employer_type'] not in employerMap) : return np.nan
    return employerMap[row['employer_type']]
train_public['employer_type']=train_public.apply(lambda row: setEmployerMap(row),axis=1)
train_internet['employer_type']=train_internet.apply(lambda row: setEmployerMap(row),axis=1)
test['employer_type']=test.apply(lambda row: setEmployerMap(row),axis=1)

industryMap={
    '交通运输、仓储和邮政业':0,
    '住宿和餐饮业':1,
    '信息传输、软件和信息技术服务业':2,
    '公共服务、社会组织':3,
    '农、林、牧、渔业':4,
    '制造业':5,
    '国际组织':6,
    '建筑业':7,
    '房地产业':8,
    '批发和零售业':9,
    '文化和体育业':10,
    '电力、热力生产供应业':11,
    '采矿业':12,
    '金融业':13
}
def setIndustryMap(row):
    if(row['industry'] not in industryMap) : return np.nan
    return industryMap[row['industry']]
train_public['industry']=train_public.apply(lambda row: setIndustryMap(row),axis=1)
train_internet['industry']=train_internet.apply(lambda row: setIndustryMap(row),axis=1)
test['industry']=test.apply(lambda row: setIndustryMap(row),axis=1)

workYearMap = {
    'nan': -1,
    '< 1 year': 0,
    '1 year':1,
    '2 years':2,
    '3 years':3,
    '4 years':4,
    '5 years':5, 
    '6 years':6,
    '7 years':7,
    '8 years':8,
    '9 years':9,
    '10+ years': 10
}
def setWorkyear(row):
    if(row['work_year'] not in workYearMap): return -1
    return workYearMap[row['work_year']]
train_public['work_year']=train_public.apply(lambda row: setWorkyear(row),axis=1)
train_internet['work_year']=train_internet.apply(lambda row: setWorkyear(row),axis=1)
test['work_year']=test.apply(lambda row: setWorkyear(row),axis=1)

def toComplete(val):
    fd=re.search('(\d+-)',val)
    if fd is None:
        return '1-'+val
    return val+'-01'

maxTime=pd.to_datetime('22-Sept-21')
def dateclear(dataframe):
    dataframe['earlies_credit_mon']=pd.to_datetime(dataframe['earlies_credit_mon'].map(toComplete))
    dataframe.loc[dataframe['earlies_credit_mon']>maxTime,'earlies_credit_mon' ]-=pd.offsets.DateOffset(years=100)
    dataframe['issue_date']=pd.to_datetime(dataframe['issue_date']) 
    dataframe['issue_date_month']=dataframe['issue_date'].dt.month
    dataframe['issue_date_dayofweek']=dataframe['issue_date'].dt.dayofweek
    dataframe['earliesCreditMon']=dataframe['earlies_credit_mon'].dt.month
    dataframe['earliesCreditYear']=dataframe['earlies_credit_mon'].dt.year
    dataframe.drop(['issue_date','earlies_credit_mon'],axis=1,inplace=True)
dateclear(train_public)
dateclear(train_internet)
dateclear(test)

train_public0=train_public.copy()
train_internet0=train_internet.copy()
test0=test.copy()

def training05(x_train,x_test,y_train,kfold):
    valPred=np.zeros(x_train.shape[0])
    testPred=np.zeros(x_test.shape[0])
    features=[i for i in x_train.columns if i not in ['loan_id']] 
    for n_fold,(idx_tra,idx_val) in enumerate(kfold.split(x_train)):
        x_tra,y_tra=x_train[features].iloc[idx_tra],y_train.iloc[idx_tra]
        x_val,y_val=x_train[features].iloc[idx_val],y_train.iloc[idx_val]
        classifier=LGBMClassifier(n_estimators=4000,learning_rate=0.08,num_leaves=32,colsample_bytree=.65,subsample=.9,max_depth=5,reg_alpha=.3,reg_lambda=.3,
            min_split_gain=.01,min_child_weight=2,silent=-1,verbose=-1)
        classifier.fit(x_tra,y_tra,eval_set=[(x_tra,y_tra),(x_val,y_val)],eval_metric='auc',verbose=0,early_stopping_rounds=40)
        valPred[idx_val]=classifier.predict_proba(x_val,num_iteration=classifier.best_iteration_)[:,1]
        testPred+=classifier.predict_proba(x_test[features],num_iteration=classifier.best_iteration_)[:,1]/kfold.n_splits
        del classifier,x_tra,y_tra,x_val,y_val;gc.collect()
    print('Full AUC score %.6f' % roc_auc_score(y_train,valPred))
    x_test['isDefault']=testPred
    return x_test[['loan_id','isDefault']].copy()

cols_from1=set(train_public.columns)
cols_same=list(cols_from1.intersection(set(train_internet.columns)))
cols_diff=list(cols_from1.difference(set(cols_same)))

train_internet_same=train_internet[cols_same].copy()
for col in cols_diff:
    train_internet_same[col]=np.nan

x_train=train_public.drop(['user_id','isDefault'],axis=1,inplace=False)
x_test=train_internet_same.drop(['user_id','isDefault'],axis=1,inplace=False)
y_train=train_public['isDefault']
folds=KFold(n_splits=5,shuffle=True,random_state=546789)
intePred=training05(x_train,x_test,y_train,folds)

train_public['From']=1;test['From']=1;train_internet_same['From']=0
InteId=intePred.loc[intePred['isDefault']<0.05,'loan_id'].tolist()
train_internet_same['isDefault']=train_internet['isDefault']
inte_train=train_internet_same[train_internet_same.loan_id.isin(InteId)].copy()
data=pd.concat([ train_public,test,inte_train]).reset_index(drop=True)
trainData=data[data['isDefault'].notna()];testData=data[data['isDefault'].isna()]

x_train=trainData.drop(['user_id','isDefault'],axis=1,inplace=False)
x_test=testData.drop(['user_id','isDefault'],axis=1,inplace=False)
y_train=trainData['isDefault']
testPred=training05(x_train,x_test,y_train,folds)
testPred=testPred.rename(columns={'loan_id':'id'})
testPred.to_csv('../user_data/05submission.csv',index=False)
print("saved subm 5")

for i in range(2):
    # 用上一次的预测结果扩充训练集
    lastSub=pd.read_csv("../user_data/05submission.csv");lastSub=lastSub.rename(columns={'id':'loan_id'})
    lastSub.loc[lastSub['isDefault']<0.5,'isDefault']=0;sub=lastSub[(lastSub['isDefault']==0)]
    train_public=pd.read_csv('../raw_data/train_public.csv')
    test_train=pd.read_csv('../raw_data/test_public.csv')
    test_train=test_train.merge(sub,on='loan_id',how='inner')
    train_public=pd.concat([train_public,test_train]).reset_index(drop=True)
    test=test0.copy()
    train_internet=train_internet0.copy()
    
    train_public['class']=train_public.apply(lambda row: setClassMap(row),axis=1)
    train_public['employer_type']=train_public.apply(lambda row: setEmployerMap(row),axis=1)
    train_public['industry']=train_public.apply(lambda row: setIndustryMap(row),axis=1)
    train_public['work_year']=train_public.apply(lambda row: setWorkyear(row),axis=1)
    dateclear(train_public)

    cols_from1=set(train_public.columns)
    cols_same=list(cols_from1.intersection(set(train_internet.columns)))
    cols_diff=list(cols_from1.difference(set(cols_same)))

    train_internet_same=train_internet[cols_same].copy()
    for col in cols_diff:
        train_internet_same[col]=np.nan

    x_train=train_public.drop(['user_id','isDefault'],axis=1,inplace=False)
    x_test=train_internet_same.drop(['user_id','isDefault'],axis=1,inplace=False)
    y_train=train_public['isDefault']
    intePred=training05(x_train,x_test,y_train,folds)

    train_public['From']=1;test['From']=1;train_internet_same['From']=0
    InteId=intePred.loc[intePred['isDefault']<0.5,'loan_id'].tolist()
    train_internet_same['isDefault']=train_internet['isDefault']
    inte_train=train_internet_same[train_internet_same.loan_id.isin(InteId)].copy()
    data=pd.concat([train_public,test,inte_train]).reset_index(drop=True)
    trainData=data[data['isDefault'].notna()];testData=data[data['isDefault'].isna()]

    x_train=trainData.drop(['user_id','isDefault'],axis=1,inplace=False)
    x_test=testData.drop(['user_id','isDefault'],axis=1,inplace=False)
    y_train=trainData['isDefault']
    testPred=training05(x_train,x_test,y_train,folds)
    testPred=testPred.rename(columns={'loan_id': 'id'})
    testPred.to_csv('../user_data/05submission.csv',index=False)
    print("saved subm 5")

sub0=pd.read_csv("../user_data/01submission.csv")
sub1=pd.read_csv("../user_data/05submission.csv")
k=0.575
testPred=sub0
testPred['isDefault']=sub0['isDefault']*k+sub1['isDefault']*(1-k)
testPred.to_csv('../prediction_result/submission.csv',index=False)
