# -*- coding: utf-8 -*-
"""
Created on Wed Nov  4 22:08:09 2020

@author: Admin
"""
import numpy as np
import pandas as pd
import jieba
import jieba.analyse

import warnings
from tqdm import tqdm
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, precision_recall_fscore_support
import xgboost as xgb
import lightgbm as lgb
import catboost as cab
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.ensemble import RandomForestClassifier
warnings.filterwarnings("ignore")

from datetime import datetime
import pypinyin
def pinyin(word):
    s = ''
    for i in pypinyin.pinyin(word, style=pypinyin.NORMAL):
        s += ''.join(i)
    return s

def countX(lst, x):
    return lst.count(x)

#Begin
folderPath='../raw_data/'
resultPath='../prediction_result/'
userfolderPath='../user_data/'


print('Model1, Model2 and Model3 will be ensembled in the following.................')
print('Model1 data preparation begins.................')
print('Data cleaning.................')
#------------------------------------------------------------------------------
#数据预处理
df_base_info=pd.read_csv(folderPath+'base_info.csv')
df_base_info=df_base_info.drop_duplicates(['id'],keep='first')

df_target=pd.read_csv(folderPath+'entprise_info.csv')
df_target=df_target.drop_duplicates(['id'],keep='first')

df=pd.merge(df_base_info,df_target,how='left',on='id')
    
df['industryco']=df['industryco'].apply(lambda x:str(x))

st_dt='2020-10-01'
st_dt=datetime.strptime(st_dt,'%Y-%m-%d')
    
df['opfrom']=df['opfrom'].apply(lambda x:datetime.strptime(x[:10],'%Y-%m-%d') if x==x else x)
df['opto']=df['opto'].apply(lambda x:datetime.strptime(x[:10],'%Y-%m-%d') if x==x else x)
df['open_len']=df['opfrom'].apply(lambda x:(st_dt.date()-x.date()).days if x==x else np.nan)
df['end_len']=df['opto'].apply(lambda x:(x.date()-st_dt.date()).days if x==x else np.nan)

df=df.fillna(-99999999)
df.to_csv(userfolderPath+'base_info_new2.csv',index=False)

df_annual=pd.read_csv(folderPath+'annual_report_info.csv')

dict_busname={np.nan:-99999999,'开业':1,'歇业':2,'停业':3,'清算':4}
df_annual['BUSSTNAME']=df_annual['BUSSTNAME'].map(dict_busname)
df_annual['ANCHEYEAR']=df_annual['ANCHEYEAR'].apply(lambda x:int(x))

df_cnt=df_annual.groupby(['id']).count()
col_name=list(df_cnt.columns)
col_name_new=[i+'_cnt' for i in col_name]
rename_dict=dict(zip(col_name,col_name_new))
df_cnt.rename(columns=rename_dict,inplace=True)

df_min=df_annual.groupby(['id']).min()
col_name=list(df_min.columns)
col_name_new=[i+'_min' for i in col_name]
rename_dict=dict(zip(col_name,col_name_new))
df_min.rename(columns=rename_dict,inplace=True)

df_max=df_annual.groupby(['id']).max()
col_name=list(df_max.columns)
col_name_new=[i+'_max' for i in col_name]
rename_dict=dict(zip(col_name,col_name_new))
df_max.rename(columns=rename_dict,inplace=True)

df_annual=df_annual.drop_duplicates(['id','ANCHEYEAR'],keep='first')
df_annual=df_annual.sort_values(by=['id','ANCHEYEAR'],ascending=True)  
df_annual_first=df_annual.drop_duplicates(['id'],keep='first')

df_annual=df_annual.sort_values(by=['id','ANCHEYEAR'],ascending=False)  
df_annual_last=df_annual.drop_duplicates(['id'],keep='first')

col_name=list(df_annual_first.columns)

col_name.remove('id')
col_name_new=[i+'_first' for i in col_name]
rename_dict=dict(zip(col_name,col_name_new))
df_annual_first.rename(columns=rename_dict,inplace=True)

df=pd.merge(df_annual_last,df_annual_first,how='left',on='id')
df=pd.merge(df,df_cnt,how='left',on='id')
df=pd.merge(df,df_min,how='left',on='id')
df=pd.merge(df,df_max,how='left',on='id')

for i in col_name:
    col=i+'_delta'
    old_col=i+'_first'
    df[col]=df[i]-df[old_col]
    
year_list=list(set(list(df_annual['ANCHEYEAR'])))

for i in year_list:
    df_tmp=df_annual[df_annual['ANCHEYEAR']==i]
    
    col_name=list(df_tmp.columns)
    col_name.remove('id')
    col_name_new=[k+'_'+str(i) for k in col_name]
    rename_dict=dict(zip(col_name,col_name_new))
    df_tmp.rename(columns=rename_dict,inplace=True)
    df=pd.merge(df,df_tmp,how='left',on='id')

df['WEBSITSIGN_ch_2016']=df.apply(lambda x: 1 if x['WEBSITSIGN_2015']==2 and x['WEBSITSIGN_2016']!=2 else 0,axis=1)
df['WEBSITSIGN_ch_2017']=df.apply(lambda x: 1 if x['WEBSITSIGN_2016']==2 and x['WEBSITSIGN_2017']!=2 else 0,axis=1)
df['WEBSITSIGN_ch_2018']=df.apply(lambda x: 1 if x['WEBSITSIGN_2017']==2 and x['WEBSITSIGN_2018']!=2 else 0,axis=1)

df['WEBSITSIGN_ch']=df['WEBSITSIGN_ch_2016']+df['WEBSITSIGN_ch_2017']+df['WEBSITSIGN_ch_2018']

df['FORINVESTSIGN_ch_2016']=df.apply(lambda x: 1 if x['FORINVESTSIGN_2015']==2 and x['FORINVESTSIGN_2016']!=2 else 0,axis=1)
df['FORINVESTSIGN_ch_2017']=df.apply(lambda x: 1 if x['FORINVESTSIGN_2016']==2 and x['FORINVESTSIGN_2017']!=2 else 0,axis=1)
df['FORINVESTSIGN_ch_2018']=df.apply(lambda x: 1 if x['FORINVESTSIGN_2017']==2 and x['FORINVESTSIGN_2018']!=2 else 0,axis=1)

df['FORINVESTSIGN_ch']=df['FORINVESTSIGN_ch_2016']+df['FORINVESTSIGN_ch_2017']+df['FORINVESTSIGN_ch_2018']

df['STOCKTRANSIGN_ch_2016']=df.apply(lambda x: 1 if x['STOCKTRANSIGN_2015']==2  else 0,axis=1)
df['STOCKTRANSIGN_ch_2017']=df.apply(lambda x: 1 if x['STOCKTRANSIGN_2016']==2  else 0,axis=1)
df['STOCKTRANSIGN_ch_2018']=df.apply(lambda x: 1 if x['STOCKTRANSIGN_2017']==2  else 0,axis=1)

df['STOCKTRANSIGN_ch']=df['STOCKTRANSIGN_ch_2016']+df['STOCKTRANSIGN_ch_2017']+df['STOCKTRANSIGN_ch_2018']

df['EMPNUM_ch_2016']=df.apply(lambda x: 1 if x['EMPNUM_2016']/(x['EMPNUM_2015']+0.0001)>=1.2 and x['EMPNUM_2016']-x['EMPNUM_2015']>10 else 0,axis=1)
df['EMPNUM_ch_2017']=df.apply(lambda x: 1 if x['EMPNUM_2017']/(x['EMPNUM_2016']+0.0001)>=1.2 and x['EMPNUM_2017']-x['EMPNUM_2016']>10 else 0,axis=1)
df['EMPNUM_ch_2018']=df.apply(lambda x: 1 if x['EMPNUM_2018']/(x['EMPNUM_2017']+0.0001)>=1.2 and x['EMPNUM_2018']-x['EMPNUM_2017']>10 else 0,axis=1)

df['EMPNUM_ch']=df['EMPNUM_ch_2016']+df['EMPNUM_ch_2017']+df['EMPNUM_ch_2018']

df=df.fillna(-99999999)
df.to_csv(userfolderPath+'annual_report_info_1.csv',index=False)

df_base_info=pd.read_csv(userfolderPath+'base_info_new2.csv')
df_base_info=df_base_info.drop_duplicates(['id'],keep='first')

df_annual=pd.read_csv(userfolderPath+'annual_report_info_1.csv')
df_annual=df_annual.drop_duplicates(['id'],keep='first')

df_other=pd.read_csv(folderPath+'other_info.csv')
df_other=df_other.groupby(['id']).max()

df=pd.merge(df_base_info,df_annual,how='left',on='id')
df=pd.merge(df,df_other,how='left',on='id')

df=df.fillna(-99999999)

df['feature_ess']=df.apply(lambda x: str(x.oplocdistrict)+
             str(x.industryphy)+str(x.industryco)+str(x.opscope)+str(x.enttype)+
             str(x.enttypeitem)+str(x.open_len)+str(x.end_len)+str(x.orgid)+str(x.jobid)+
             str(x.adbusign)+str(x.townsign)+str(x.regtype)+str(x.compform)+str(x.opform)+
             str(x.ptbusscope)+str(x.venind)+str(x.enttypeminu)+str(x.enttypegb)+
             str(x.legal_judgment_num)+str(x.FORINVESTSIGN_ch_2016)+str(x.FORINVESTSIGN_ch_2017)+
             str(x.FORINVESTSIGN_ch_2018),axis=1)
col_name_df=list(df.columns)
df['link_lable']=-99999999.0

len_base2=len(df)

for i in range(len_base2):
    fea_key=df['feature_ess'][i]
    id_key=df['id'][i]
    regcap=df['regcap'][i]
    
    df_tmp=df[(df['feature_ess']==fea_key) & (df['id']!=id_key) ]
    df_tmp=df_tmp[(df_tmp['label']>=0) & (df_tmp['regcap']<3*regcap) & 
        (df_tmp['regcap']>0.33*regcap)]

    if len(df_tmp)>0:
        df['link_lable'][i]=df_tmp['label'].mean()
 
df['black_hit']=df.apply(lambda x:1 if x.link_lable==1 else 0,axis=1)
df['white_hit']=df.apply(lambda x:1 if x.link_lable==0 else 0,axis=1)

def  black_white(x):
    if x==1:
        return 2
    elif x==0:
        return -2
    else:
        return 0

df['white_black']=df['link_lable'].apply(black_white)
df_black_white=df[['id','link_lable','black_hit','white_hit','white_black','label']]        
#------------------------------------------------------------------------------

#------------------------------------------------------------------------------
base_info=pd.read_csv(folderPath+'base_info.csv')#企业的基本信息
annual_report_info=pd.read_csv(folderPath+'annual_report_info.csv')#企业的年报基本信息
tax_info=pd.read_csv(folderPath+'tax_info.csv')#企业的纳税信息
change_info=pd.read_csv(folderPath+'change_info.csv')#变更信息
news_info=pd.read_csv(folderPath+'news_info.csv')#舆情信息
other_info=pd.read_csv(folderPath+'other_info.csv')#其它信息
entprise_info=pd.read_csv(folderPath+'entprise_info.csv')#企业标注信息{0: 13884, 1: 981}
entprise_evaluate=pd.read_csv(folderPath+'entprise_evaluate.csv')#未标注信息

print('Process other tables .............')
#change_info
change_info_clean=change_info.drop(['bgrq','bgq','bgh'],axis=1)
change_info_clean = change_info_clean.groupby('id',sort=False).agg('mean')
change_info_clean=pd.DataFrame(change_info_clean).reset_index()
#other_info
#空值大于0.5的列都删除掉
buf_group = other_info.groupby('id',sort=False).agg('mean')
other_info_clean=pd.DataFrame(buf_group).reset_index()
other_info_clean=other_info_clean.fillna(-1)
other_info_clean = other_info_clean.groupby('id',sort=False).agg('mean')
other_info_clean=pd.DataFrame(other_info_clean).reset_index()
#news_info
news_info_clean=news_info.drop(['public_date'],axis=1)
#对object类型进行编码
news_info_clean['positive_negtive']=news_info_clean['positive_negtive'].fillna("中立")
#
dic={}
cate=news_info_clean.positive_negtive.unique()
for i in range(len(cate)):
    dic[cate[i]]=i
#
news_info_clean['positive_negtive']=news_info_clean['positive_negtive'].map(dic)
news_info_clean = news_info_clean.groupby('id',sort=False).agg('mean')
news_info_clean=pd.DataFrame(news_info_clean).reset_index()
#处理annual_report_info的数据
#空值大于0.5的列都删除掉
annual_report_info_clean=annual_report_info.dropna(thresh=annual_report_info.shape[0]*0.5,how='all',axis=1)
#对object类型进行编码
annual_report_info_clean['BUSSTNAME']=annual_report_info_clean['BUSSTNAME'].fillna("无")
dic = {'无':-1,'开业':0, '歇业':1, '停业':2, '清算':3}
#
annual_report_info_clean['BUSSTNAME']=annual_report_info_clean['BUSSTNAME'].map(dic)
annual_report_info_clean = annual_report_info_clean.groupby('id',sort=False).agg('mean')
annual_report_info_clean=pd.DataFrame(annual_report_info_clean).reset_index()
#处理tax数据
tax_info_clean=tax_info.copy()
tax_info_clean['START_DATE']=pd.to_datetime(tax_info_clean['START_DATE'])
tax_info_clean['END_DATE']=pd.to_datetime(tax_info_clean['END_DATE'])
tax_info_clean['gap_day']=(tax_info_clean['END_DATE']-tax_info_clean['START_DATE']).dt.total_seconds()//3600//24
tax_info_clean=tax_info_clean.drop(['START_DATE','END_DATE'],axis=1)
tax_info_clean['TAX_CATEGORIES']=tax_info_clean['TAX_CATEGORIES'].fillna("无")#17 unique
tax_info_clean['TAX_ITEMS']=tax_info_clean['TAX_ITEMS'].fillna("无")#275 TAX_ITEMS
#对object类型进行编码
dic={}
cate=tax_info_clean.TAX_CATEGORIES.unique()
for i in range(len(cate)):
    dic[cate[i]]=i
tax_info_clean['TAX_CATEGORIES']=tax_info_clean['TAX_CATEGORIES'].map(dic)
#
dic={}
cate=tax_info_clean.TAX_ITEMS.unique()
for i in range(len(cate)):
    dic[cate[i]]=i
tax_info_clean['TAX_ITEMS']=tax_info_clean['TAX_ITEMS'].map(dic)
tax_info_clean['income']=tax_info_clean['TAX_AMOUNT']/tax_info_clean['TAX_RATE']
#
tax_info_clean = tax_info_clean.groupby('id',sort=False).agg('mean')
tax_info_clean=pd.DataFrame(tax_info_clean).reset_index()
#税额分箱
tax_info_clean['TAX_AMOUNT']=tax_info_clean['TAX_AMOUNT'].fillna(tax_info_clean['TAX_AMOUNT'].median())
tax_info_clean['bucket_TAX_AMOUNT']=pd.qcut(tax_info_clean['TAX_AMOUNT'], 10, labels=False,duplicates='drop')
print('Process base_info .............')

# ## base_info数据较为重要，需要构建诸多交叉特征以及特征分箱

# #处理base_info数据
base_info['opto']=pd.to_datetime(base_info['opto']).fillna(pd.to_datetime(base_info['opto']).max())
base_info['opfrom']=pd.to_datetime(base_info['opfrom'])
base_info['gap_year']=(base_info['opto']-base_info['opfrom']).dt.total_seconds()//3600//24//365
base_info_clean=base_info.drop(['opfrom','opto'],axis=1)

#............................对object类型进行编码...............................
base_info_clean['industryphy']=base_info_clean['industryphy'].fillna("无")
base_info_clean['dom']=base_info_clean['dom'].fillna("无")
base_info_clean['opform']=base_info_clean['opform'].fillna("无")
base_info_clean['oploc']=base_info_clean['oploc'].fillna("无")
#
dic={}
cate=base_info_clean.industryphy.unique()
for i in range(len(cate)):
    dic[cate[i]]=i
base_info_clean['industryphy']=base_info_clean['industryphy'].map(dic)
#
dic={}
cate=base_info_clean.dom.unique()
for i in range(len(cate)):
    dic[cate[i]]=i
base_info_clean['dom']=base_info_clean['dom'].map(dic)
#
dic={}
cate=base_info_clean.opform.unique()
for i in range(len(cate)):
    dic[cate[i]]=i
base_info_clean['opform']=base_info_clean['opform'].map(dic)
#
dic={}
cate=base_info_clean.oploc.unique()
for i in range(len(cate)):
    dic[cate[i]]=i
base_info_clean['oploc']=base_info_clean['oploc'].map(dic)
#
base_info_clean=base_info_clean.fillna(-1)
#
#........................分箱.................................
def bucket(name,bucket_len):
    gap_list=[base_info_clean[name].quantile(i/bucket_len) for i in range(bucket_len+1)]#以分位数作为分箱标志
    new_col=[]
    for i in base_info_clean[name].values:
        for j in range(len(gap_list)):
            if gap_list[j]>=i:
                encode=j
                break
        new_col.append(encode)
    return new_col
#注册资本_实缴资本
base_info_clean['regcap_reccap']=base_info_clean['regcap']-base_info_clean['reccap']
#注册资本分箱
base_info_clean['regcap']=base_info_clean['regcap'].fillna(base_info_clean['regcap'].median())
base_info_clean['bucket_regcap']=pd.qcut(base_info_clean['regcap'], 10, labels=False,duplicates='drop')
#实缴资本分箱
base_info_clean['reccap']=base_info_clean['reccap'].fillna(base_info_clean['reccap'].median())
base_info_clean['bucket_reccap']=pd.qcut(base_info_clean['reccap'], 10, labels=False,duplicates='drop')
#注册资本_实缴资本分箱
base_info_clean['regcap_reccap']=base_info_clean['regcap_reccap'].fillna(base_info_clean['regcap_reccap'].median())
base_info_clean['bucket_regcap_reccap']=pd.qcut(base_info_clean['regcap_reccap'], 10, labels=False,duplicates='drop')
#.............................交叉.........................
#作两个特征的交叉
def cross_two(name_1,name_2):
    new_col=[]
    encode=0
    dic={}
    val_1=base_info_clean[name_1]
    val_2=base_info_clean[name_2]
    for i in tqdm(range(len(val_1))):
        tmp=str(val_1[i])+'_'+str(val_2[i])
        if tmp in dic:
            new_col.append(dic[tmp])
        else:
            dic[tmp]=encode
            new_col.append(encode)
            encode+=1
    return new_col
#作企业类型-小类的交叉特征
base_info_clean['enttypegb']=base_info_clean['enttypegb'].fillna("无")
base_info_clean['enttypeitem']=base_info_clean['enttypeitem'].fillna("无")
new_col=cross_two('enttypegb','enttypeitem')#作企业类型-小类的交叉特征
base_info_clean['enttypegb_enttypeitem']=new_col
#
#行业类别-细类的交叉特征
base_info_clean['industryphy']=base_info_clean['industryphy'].fillna("无")
base_info_clean['industryco']=base_info_clean['industryco'].fillna("无")
new_col=cross_two('industryphy','industryco')#作企业类型-小类的交叉特征
base_info_clean['industryphy_industryco']=new_col
#企业类型-行业类别的交叉特征
new_col=cross_two('enttypegb','industryphy')#作企业类型-小类的交叉特征
base_info_clean['enttypegb_industryphy']=new_col
#行业类别-企业类型小类的交叉特征
new_col=cross_two('industryphy','enttypeitem')#作企业类型-小类的交叉特征
base_info_clean['industryphy_enttypeitem']=new_col
#行业类别细类--企业类型小类的交叉特征
new_col=cross_two('industryco','enttypeitem')#作企业类型-小类的交叉特征
base_info_clean['industryco_enttypeitem']=new_col

#企业类型-小类-行业类别-细类的交叉特征
new_col=cross_two('enttypegb_enttypeitem','industryphy_industryco')#作企业类型-小类的交叉特征
base_info_clean['enttypegb_enttypeitem_industryphy_industryco']=new_col


# ## category特征单独提取出来

cat_features=['industryphy','dom','opform','oploc','bucket_regcap',
              'bucket_reccap','bucket_regcap_reccap',
              'enttypegb','enttypeitem','enttypegb_enttypeitem',
              'enttypegb_industryphy','enttypegb_enttypeitem_industryphy_industryco',
              'industryphy','industryco','industryphy_industryco',
              'industryphy_enttypeitem','industryco_enttypeitem',
              'adbusign','townsign','regtype','TAX_CATEGORIES','bucket_TAX_AMOUNT',
              'legal_judgment_num','brand_num','patent_num','positive_negtive'
             ]

#
all_data=base_info_clean.merge(annual_report_info_clean,how='outer')
all_data=all_data.merge(tax_info_clean,how='outer')
all_data=all_data.merge(change_info_clean,how='outer')
all_data=all_data.merge(news_info_clean,how='outer')
all_data=all_data.merge(other_info_clean,how='outer')
all_data=all_data.fillna(-1)
all_data[cat_features]=all_data[cat_features].astype(int)
print('Dealing with opscope feature.................')
#处理opscope
df=all_data
df['opscope1']=df['opscope'].apply(lambda x:x.replace('（依法须经批准的项目，经相关部门批准后方可开展经营活动）',''))
stopword_list = [k.strip() for k in open(userfolderPath+'cn_stopwords.txt', encoding='utf8').readlines() if k.strip() != '']

str_tmp=''
for i in range(len(df['opscope1'])):
    str_tmp=str_tmp+df['opscope1'][i]
stopword_list=stopword_list+['）','（','(',')','*','【','】',' ','，',',']    
words_all=[i for i in jieba.cut(str_tmp) if i not in stopword_list]
tags1 = jieba.analyse.extract_tags(str_tmp, topK=300)
corpus = pd.DataFrame(words_all, columns=['word'])
corpus['cnt'] = 1
word_freq = corpus.groupby(['word']).agg({'cnt': 'count'}).sort_values('cnt', ascending=False)
word_freq['word']=word_freq.index
tags2=list(word_freq.head(300)['word'])
tags=list(set(tags1+tags2))
for i in tags:
    col_name='cnt_'+pinyin(i)
    df[col_name]=df['opscope1'].apply(lambda x:countX(x, i))    
#df=df.fillna(-99999999)
all_data=df.drop(['opscope','opscope1','ptbusscope','midpreindcode'],axis=1)

#
train_df=all_data.merge(entprise_info)
test_df=all_data[all_data['id'].isin(entprise_evaluate['id'].unique().tolist())]

#train_df.to_csv(folderPath+'train1.csv',index=False)
#test_df.to_csv(folderPath+'test1.csv',index=False)
rescale_features = {u'cnt_weixiu': u'AVGSTD', u'cnt_baozhuang': u'AVGSTD', u'cnt_dongman': u'AVGSTD', u'cnt_guanli': u'AVGSTD', u'cnt_fuzhuangxiemao': u'AVGSTD', u'enttypegb': u'AVGSTD', u'cnt_jisuanji': u'AVGSTD', u'cnt_chengshi': u'AVGSTD', u'cnt_jingji': u'AVGSTD', u'cnt_shiye': u'AVGSTD', u'cnt_qingxi': u'AVGSTD', u'cnt_baoyang': u'AVGSTD', u'cnt_fabu': u'AVGSTD', u'industryphy': u'AVGSTD', u'cnt_chuli': u'AVGSTD', u'cnt_jingyingxing': u'AVGSTD', u'townsign': u'AVGSTD', u'cnt_jianzhugongcheng': u'AVGSTD', u'ANCHEYEAR': u'AVGSTD', u'cnt_yewu': u'AVGSTD', u'cnt_dianzi': u'AVGSTD', u'cnt_xiaoshou': u'AVGSTD', u'cnt_xiangbao': u'AVGSTD', u'cnt_jianzhucailiao': u'AVGSTD', u'cnt_kaifa': u'AVGSTD', u'cnt_jishuzixun': u'AVGSTD', u'cnt_huodong': u'AVGSTD', u'cnt_jianzhuzhuangxiu': u'AVGSTD', u'UNENUM': u'AVGSTD', u'cnt_shexiang': u'AVGSTD', u'COLGRANUM': u'AVGSTD', u'bucket_regcap_reccap': u'AVGSTD', u'patent_num': u'AVGSTD', u'cnt_miaomu': u'AVGSTD', u'cnt_lingpeijian': u'AVGSTD', u'cnt_jiaoxue': u'AVGSTD', u'cnt_jishu': u'AVGSTD', u'cnt_meijia': u'AVGSTD', u'cnt_shangpin': u'AVGSTD', u'cnt_laji': u'AVGSTD', u'cnt_lingbujian': u'AVGSTD', u'cnt_yongpin': u'AVGSTD', u'cnt_congshi': u'AVGSTD', u'cnt_tezhongshebei': u'AVGSTD', u'TAX_RATE': u'AVGSTD', u'cnt_peixun': u'AVGSTD', u'cnt_zhizuo': u'AVGSTD', u'cnt_yishujiaoliu': u'AVGSTD', u'TAX_AMOUNT': u'AVGSTD', u'cnt_cangchu': u'AVGSTD', u'regcap': u'AVGSTD', u'cnt_caiwuzixun': u'AVGSTD', u'industryco_enttypeitem': u'AVGSTD', u'cnt_qihuo': u'AVGSTD', u'cnt_cehua': u'AVGSTD', u'enttypeminu': u'AVGSTD', u'gap_day': u'AVGSTD', u'cnt_shichang': u'AVGSTD', u'cnt_wenhuayongpin': u'AVGSTD', u'cnt_fuzhuang': u'AVGSTD', u'cnt_minbanjiaoyu': u'AVGSTD', u'cnt_houfang': u'AVGSTD', u'cnt_zuzhi': u'AVGSTD', u'cnt_guonei': u'AVGSTD', u'cnt_wujinjiaodian': u'AVGSTD', u'cnt_wutai': u'AVGSTD', u'cnt_xin': u'AVGSTD', u'cnt_tushifang': u'AVGSTD', u'cnt_jishukaifa': u'AVGSTD', u'cnt_xu': u'AVGSTD', u'cnt_jinshucailiao': u'AVGSTD', u'cnt_huizhan': u'AVGSTD', u'cnt_suliaozhipin': u'AVGSTD', u'cnt_gongchengshigong': u'AVGSTD', u'cnt_baojie': u'AVGSTD', u'jobid': u'AVGSTD', u'cnt_tigong': u'AVGSTD', u'adbusign': u'AVGSTD', u'cnt_shangwang': u'AVGSTD', u'cnt_jinzhi': u'AVGSTD', u'cnt_xianding': u'AVGSTD', u'congro': u'AVGSTD', u'cnt_nongchanpin': u'AVGSTD', u'orgid': u'AVGSTD', u'cnt_yingshi': u'AVGSTD', u'cnt_fuzhu': u'AVGSTD', u'WEBSITSIGN': u'AVGSTD', u'cnt_lvhua': u'AVGSTD', u'cnt_ziying': u'AVGSTD', u'cnt_anzhuang': u'AVGSTD', u'cnt_gongsi': u'AVGSTD', u'cnt_bude': u'AVGSTD', u'cnt_tuwen': u'AVGSTD', u'cnt_gongyong': u'AVGSTD', u'cnt_zhuanghuang': u'AVGSTD', u'cnt_muju': u'AVGSTD', u'enttypegb_enttypeitem_industryphy_industryco': u'AVGSTD', u'cnt_yue': u'AVGSTD', u'cnt_peijian': u'AVGSTD', u'cnt_weixian': u'AVGSTD', u'cnt_jiancai': u'AVGSTD', u'cnt_weishengyongpin': u'AVGSTD', u'cnt_tiyu': u'AVGSTD', u'TAX_ITEMS': u'AVGSTD', u'cnt_wanju': u'AVGSTD', u'cnt_qiye': u'AVGSTD', u'cnt_shouxu': u'AVGSTD', u'cnt_yiqiyibiao': u'AVGSTD', u'STOCKTRANSIGN': u'AVGSTD', u'cnt_qiyexingxiang': u'AVGSTD', u'COLEMPLNUM': u'AVGSTD', u'cnt_jiajuyongpin': u'AVGSTD', u'cnt_jixie': u'AVGSTD', u'regtype': u'AVGSTD', u'cnt_huoyun': u'AVGSTD', u'cnt_gongguanhuodong': u'AVGSTD', u'cnt_qichepeijian': u'AVGSTD', u'cnt_muying': u'AVGSTD', u'cnt_riyongpin': u'AVGSTD', u'cnt_yule': u'AVGSTD', u'legal_judgment_num': u'AVGSTD', u'cnt_yanchu': u'AVGSTD', u'cnt_yuanlinjingguan': u'AVGSTD', u'cnt_huowu': u'AVGSTD', u'cnt_cailiao': u'AVGSTD', u'cnt_zichan': u'AVGSTD', u'cnt_jishutuiguang': u'AVGSTD', u'cnt_qipai': u'AVGSTD', u'cnt_dailifuwu': u'AVGSTD', u'cnt_huishou': u'AVGSTD', u'cnt_zhinenghua': u'AVGSTD', u'cnt_jinrong': u'AVGSTD', u'TAX_CATEGORIES': u'AVGSTD', u'cnt_huanjing': u'AVGSTD', u'cnt_yunshu': u'AVGSTD', u'bgxmdm': u'AVGSTD', u'cnt_anmo': u'AVGSTD', u'cnt_guanggao': u'AVGSTD', u'industryphy_enttypeitem': u'AVGSTD', u'enttypegb_enttypeitem': u'AVGSTD', u'cnt_huiyi': u'AVGSTD', u'cnt_touzi': u'AVGSTD', u'oploc': u'AVGSTD', u'bucket_reccap': u'AVGSTD', u'cnt_jishuzhuanrang': u'AVGSTD', u'cnt_jieneng': u'AVGSTD', u'RETEMPLNUM': u'AVGSTD', u'industryphy_industryco': u'AVGSTD', u'cnt_tiyuyongpin': u'AVGSTD', u'cnt_dianzishangwu': u'AVGSTD', u'cnt_xiemao': u'AVGSTD', u'cnt_haocai': u'AVGSTD', u'cnt_jicheng': u'AVGSTD', u'cnt_shengwu': u'AVGSTD', u'cnt_yu': u'AVGSTD', u'cnt_zhaoming': u'AVGSTD', u'forregcap': u'AVGSTD', u'cnt_jisuanjixinxi': u'AVGSTD', u'cnt_bangongyongpin': u'AVGSTD', u'cnt_tongxunqicai': u'AVGSTD', u'cnt_meiti': u'AVGSTD', u'cnt_pizhun': u'AVGSTD', u'cnt_ruanyingjian': u'AVGSTD', u'cnt_cunkuan': u'AVGSTD', u'cnt_wuyeguanli': u'AVGSTD', u'cnt_ershouche': u'AVGSTD', u'cnt_wenhua': u'AVGSTD', u'cnt_zhenfangzhipin': u'AVGSTD', u'cnt_yanfa': u'AVGSTD', u'cnt_gongye': u'AVGSTD', u'cnt_hunqing': u'AVGSTD', u'cnt_gongchengsheji': u'AVGSTD', u'DISPERNUM': u'AVGSTD', u'cnt_daike': u'AVGSTD', u'cnt_chuwai': u'AVGSTD', u'cnt_jianshen': u'AVGSTD', u'cnt_taiyangneng': u'AVGSTD', u'cnt_meifa': u'AVGSTD', u'cnt_danbao': u'AVGSTD', u'cnt_jisuanjixitong': u'AVGSTD', u'dom': u'AVGSTD', u'cnt_shebei': u'AVGSTD', u'cnt_zhizao': u'AVGSTD', u'cnt_zhineng': u'AVGSTD', u'cnt_zizhi': u'AVGSTD', u'cnt_sheji': u'AVGSTD', u'cnt_qiche': u'AVGSTD', u'UNEEMPLNUM': u'AVGSTD', u'cnt_biaopai': u'AVGSTD', u'cnt_qingjieyongpin': u'AVGSTD', u'cnt_nongye': u'AVGSTD', u'cnt_putong': u'AVGSTD', u'cnt_huagongchanpin': u'AVGSTD', u'cnt_meirong': u'AVGSTD', u'cnt_jiazheng': u'AVGSTD', u'cnt_daolu': u'AVGSTD', u'cnt_shizheng': u'AVGSTD', u'cnt_kaizhan': u'AVGSTD', u'cnt_huanbao': u'AVGSTD', u'cnt_shangwuxinxi': u'AVGSTD', u'cnt_zhuanye': u'AVGSTD', u'cnt_jiaju': u'AVGSTD', u'cnt_jiankang': u'AVGSTD', u'cnt_LED': u'AVGSTD', u'cnt_zhanshi': u'AVGSTD', u'cnt_jiankong': u'AVGSTD', u'cnt_yishang': u'AVGSTD', u'cnt_canyin': u'AVGSTD', u'cnt_jingying': u'AVGSTD', u'cnt_zixunfuwu': u'AVGSTD', u'empnum': u'AVGSTD', u'cnt_gongyilipin': u'AVGSTD', u'cnt_gangjiegou': u'AVGSTD', u'cnt_shineiwai': u'AVGSTD', u'RETSOLNUM': u'AVGSTD', u'cnt_tongxin': u'AVGSTD', u'cnt_jianshenqicai': u'AVGSTD', u'cnt_zidonghua': u'AVGSTD', u'venind': u'AVGSTD', u'cnt_jingyingfanwei': u'AVGSTD', u'cnt_guquan': u'AVGSTD', u'cnt_qicheyongpin': u'AVGSTD', u'cnt_liyi': u'AVGSTD', u'cnt_nian': u'AVGSTD', u'cnt_laobaoyongpin': u'AVGSTD', u'cnt_shouhoufuwu': u'AVGSTD', u'cnt_fangwu': u'AVGSTD', u'cnt_jiayongdianqi': u'AVGSTD', u'cnt_jiaoyu': u'AVGSTD', u'cnt_xingxiangsheji': u'AVGSTD', u'enttypegb_industryphy': u'AVGSTD', u'cnt_shineizhuangshi': u'AVGSTD', u'cnt_paiqian': u'AVGSTD', u'cnt_zhongzhi': u'AVGSTD', u'cnt_wenwu': u'AVGSTD', u'cnt_yunying': u'AVGSTD', u'cnt_dianzichanpin': u'AVGSTD', u'cnt_yanghu': u'AVGSTD', u'cnt_yifa': u'AVGSTD', u'cnt_wangzhan': u'AVGSTD', u'cnt_chuangyetouzi': u'AVGSTD', u'cnt_jixieshebei': u'AVGSTD', u'cnt_zhuangshi': u'AVGSTD', u'cnt_guanggaosheji': u'AVGSTD', u'cnt_bangong': u'AVGSTD', u'state': u'AVGSTD', u'cnt_yanjiu': u'AVGSTD', u'cnt_zhanlan': u'AVGSTD', u'cnt_jianguanbumen': u'AVGSTD', u'cnt_kejilingyu': u'AVGSTD', u'cnt_yuanlinlvhua': u'AVGSTD', u'bucket_regcap': u'AVGSTD', u'cnt_huahui': u'AVGSTD', u'cnt_shengchan': u'AVGSTD', u'cnt_jiaoliu': u'AVGSTD', u'cnt_jinchukou': u'AVGSTD', u'cnt_daili': u'AVGSTD', u'TAXATION_BASIS': u'AVGSTD', u'cnt_weixianpin': u'AVGSTD', u'cnt_lingshou': u'AVGSTD', u'cnt_lvyou': u'AVGSTD', u'DISEMPLNUM': u'AVGSTD', u'STATE': u'AVGSTD', u'cnt_zixun': u'AVGSTD', u'cnt_wenyichuangzuo': u'AVGSTD', u'cnt_qianzhi': u'AVGSTD', u'cnt_shipin': u'AVGSTD', u'cnt_tuiguang': u'AVGSTD', u'cnt_xuke': u'AVGSTD', u'cnt_xingzheng': u'AVGSTD', u'cnt_weijing': u'AVGSTD', u'forreccap': u'AVGSTD', u'cnt_fuwu': u'AVGSTD', u'cnt_yinglixing': u'AVGSTD', u'cnt_zhili': u'AVGSTD', u'cnt_yiliaoqixie': u'AVGSTD', u'cnt_xinnengyuan': u'AVGSTD', u'cnt_weihuapin': u'AVGSTD', u'cnt_laowu': u'AVGSTD', u'cnt_shigong': u'AVGSTD', u'cnt_shumachanpin': u'AVGSTD', u'cnt_yiliao': u'AVGSTD', u'brand_num': u'AVGSTD', u'cnt_jiance': u'AVGSTD', u'cnt_bao': u'AVGSTD', u'cnt_zulin': u'AVGSTD', u'cnt_hulianwang': u'AVGSTD', u'DEDUCTION': u'AVGSTD', u'cnt_ruanjiankaifa': u'AVGSTD', u'positive_negtive': u'AVGSTD', u'cnt_keji': u'AVGSTD', u'cnt_huazhuangpin': u'AVGSTD', u'exenum': u'AVGSTD', u'gap_year': u'AVGSTD', u'cnt_wangluo': u'AVGSTD', u'cnt_zuyu': u'AVGSTD', u'cnt_jidianshebei': u'AVGSTD', u'cnt_erlei': u'AVGSTD', u'parnum': u'AVGSTD', u'cnt_ruanjian': u'AVGSTD', u'enttypeitem': u'AVGSTD', u'cnt_dianxiandianlan': u'AVGSTD', u'protype': u'AVGSTD', u'cnt_xitong': u'AVGSTD', u'cnt_yingxiaocehua': u'AVGSTD', u'cnt_guojia': u'AVGSTD', u'cnt_guangfu': u'AVGSTD', u'cnt_gelei': u'AVGSTD', u'cnt_daiban': u'AVGSTD', u'cnt_baojianshipin': u'AVGSTD', u'cnt_jianshe': u'AVGSTD', u'cnt_jinrongfuwu': u'AVGSTD', u'cnt_diannao': u'AVGSTD', u'cnt_jidongche': u'AVGSTD', u'cnt_jisuanjiwangluo': u'AVGSTD', u'cnt_chanpin': u'AVGSTD', u'cnt_yiyao': u'AVGSTD', u'cnt_anfang': u'AVGSTD', u'cnt_qingjie': u'AVGSTD', u'cnt_zhengquan': u'AVGSTD', u'cnt_xiangmu': u'AVGSTD', u'cnt_wangluokeji': u'AVGSTD', u'cnt_shenpi': u'AVGSTD', u'cnt_huaxuepin': u'AVGSTD', u'cnt_jiagong': u'AVGSTD', u'cnt_chengban': u'AVGSTD', u'cnt_wangshang': u'AVGSTD', u'cnt_gongcheng': u'AVGSTD', u'cnt_jisuanjiruanjian': u'AVGSTD', u'reccap': u'AVGSTD', u'cnt_xinxijishu': u'AVGSTD', u'cnt_xiangguan': u'AVGSTD', u'cnt_han': u'AVGSTD', u'cnt_zhipin': u'AVGSTD', u'cnt_fangdichan': u'AVGSTD', u'cnt_licai': u'AVGSTD', u'cnt_gongzhong': u'AVGSTD', u'cnt_guijinshu': u'AVGSTD', u'cnt_riyongbaihuo': u'AVGSTD', u'regcap_reccap': u'AVGSTD', u'FORINVESTSIGN': u'AVGSTD', u'cnt_yishu': u'AVGSTD', u'cnt_weixiufuwu': u'AVGSTD', u'cnt_zhuangxiu': u'AVGSTD', u'BUSSTNAME': u'AVGSTD', u'cnt_xujing': u'AVGSTD', u'cnt_zhuangxie': u'AVGSTD', u'cnt_wujin': u'AVGSTD', u'cnt_tiyusaishi': u'AVGSTD', u'cnt_sheying': u'AVGSTD', u'cnt_shenghuo': u'AVGSTD', u'cnt_sheshi': u'AVGSTD', u'cnt_jiqiren': u'AVGSTD', u'cnt_rongzi': u'AVGSTD', u'cnt_gongyipin': u'AVGSTD', u'cnt_pifa': u'AVGSTD', u'cnt_jingnei': u'AVGSTD', u'cnt_zhongjie': u'AVGSTD', u'cnt_huiwu': u'AVGSTD', u'cnt_ruodian': u'AVGSTD', u'cnt_jianzhu': u'AVGSTD', u'cnt_bumen': u'AVGSTD', u'cnt_xukezheng': u'AVGSTD', u'cnt_qicai': u'AVGSTD', u'cnt_fei': u'AVGSTD', u'cnt_pingmiansheji': u'AVGSTD', u'oplocdistrict': u'AVGSTD', u'industryco': u'AVGSTD', u'cnt_yuanqijian': u'AVGSTD', u'cnt_bangongshebei': u'AVGSTD', u'cnt_xinxi': u'AVGSTD', u'cnt_lingyu': u'AVGSTD', u'cnt_jidian': u'AVGSTD', u'cnt_wai': u'AVGSTD', u'EMPNUMSIGN': u'AVGSTD', u'cnt_jingyingxiangmu': u'AVGSTD', u'enttype': u'AVGSTD', u'PUBSTATE': u'AVGSTD', u'income': u'AVGSTD', u'cnt_tongxunshebei': u'AVGSTD', u'cnt_dianying': u'AVGSTD', u'bucket_TAX_AMOUNT': u'AVGSTD', u'compform': u'AVGSTD', u'cnt_shinei': u'AVGSTD', u'cnt_jian': u'AVGSTD', u'EMPNUM': u'AVGSTD', u'opform': u'AVGSTD', u'cnt_zhenduan': u'AVGSTD', u'cnt_diaocha': u'AVGSTD', u'cnt_lifa': u'AVGSTD', u'cnt_xiyu': u'AVGSTD', u'cnt_shuidian': u'AVGSTD', u'cnt_weihu': u'AVGSTD', u'cnt_dianxinyewu': u'AVGSTD', u'cnt_fanwei': u'AVGSTD', u'cnt_guandao': u'AVGSTD'}
for (feature_name, rescale_method) in rescale_features.items():
    if rescale_method == 'MINMAX':
        _min = train_df[feature_name].min()
        _max = train_df[feature_name].max()
        scale = _max - _min
        shift = _min
    else:
        shift = train_df[feature_name].mean()
        scale = train_df[feature_name].std()
    if scale == 0.:
        del train_df[feature_name]
        del test_df[feature_name]
        print ('Feature %s was dropped because it has no variance' % feature_name)
    else:
        train_df[feature_name] = (train_df[feature_name] - shift).astype(np.float64) / scale
        test_df[feature_name] = (test_df[feature_name] - shift).astype(np.float64) / scale

train_df_85, test_df_15 = train_test_split(train_df, train_size=0.85, random_state=1337, shuffle=True)

print('Training Model1.................')
x_train=train_df_85.drop(['id','label'],axis=1)
y_train=train_df_85['label']
test_data=test_df.drop(['id'],axis=1)

xlf = xgb.XGBClassifier(max_depth=10,
                    learning_rate=0.1,
                    gamma=0.0,
                    min_child_weight=0.0,
                    max_delta_step=0.0,
                    subsample=0.8,
                    colsample_bytree=0.8,
                    colsample_bylevel=1.0,
                    reg_alpha=0.0,
                    reg_lambda=1.0,
                    n_estimators=22,
                    nthread=4,
                    scale_pos_weight=1.0,
                    base_score=0.5,
                    seed=1337,
                    missing=None,
                    random_state=1337
                  )

llf=lgb.LGBMClassifier(objective='binary',
                        learning_rate=0.1,
                        num_leaves=500,
                        max_depth=10,
                        n_estimators=300,
                        reg_lambda=1,
                        n_jobs=8,
                        metric='binary_logloss',
                        random_state=1337)
  
clf=cab.CatBoostClassifier(n_estimators=100
                              ,learning_rate=0.1
                              ,max_depth=10
                              ,reg_lambda=1
                              #,subsample=0.8
                              ,silent=True
                              ,task_type='CPU'
                              ,train_dir=userfolderPath
                              ,random_state=1337)

rf = RandomForestClassifier(random_state=1337,max_features=0.8,
            n_estimators=100,max_depth=25,min_samples_leaf=3,
            min_samples_split=9,bootstrap=True)

gbt = GradientBoostingClassifier(loss='deviance', n_estimators=100,max_features=0.8,max_depth=20, min_samples_leaf=3, learning_rate=0.1,random_state=1337)

xlf.fit(x_train, y_train)
print('xlf ready.................')
llf.fit(x_train, y_train)
print('llf ready.................')
clf.fit(x_train, y_train)
print('clf ready.................')
rf.fit(x_train, y_train)
print('rf ready.................')
gbt.fit(x_train, y_train)
print('gbt ready.................')

result_xgb = xlf.predict(test_data)
result_lgb = llf.predict(test_data)
result_cab = clf.predict(test_data)
result_rf = rf.predict(test_data)
result_gbt = gbt.predict(test_data)

test_df['score1']=result_xgb
test_df['score2']=result_lgb
test_df['score3']=result_cab
test_df['score4']=result_rf
test_df['score5']=result_gbt
test_df['score_vote']=test_df.apply(lambda x:0.2*(x.score1+x.score2+x.score3+x.score4+x.score5),axis=1)

vote_result=test_df[['id', 'score_vote']]
vote_result=pd.merge(vote_result,df_black_white,how='left',on='id')
vote_result['score_vote']=vote_result['score_vote']+vote_result['white_black']

print('Model1 ready.................')
print('f1 for Model1 = 0.8467')

def filter_col_by_nan(df, ratio=0.05):
    cols = []
    for col in df.columns:
        if df[col].isna().mean() >= (1-ratio):
            cols.append(col)
    return cols

print('Model2 and Model3 data preparation begins.................')
print('Process other tables .............')
base_info=pd.read_csv(folderPath+'base_info.csv')#企业的基本信息
annual_report_info=pd.read_csv(folderPath+'annual_report_info.csv')#企业的年报基本信息
tax_info=pd.read_csv(folderPath+'tax_info.csv')#企业的纳税信息
change_info=pd.read_csv(folderPath+'change_info.csv')#变更信息
news_info=pd.read_csv(folderPath+'news_info.csv')#舆情信息
other_info=pd.read_csv(folderPath+'other_info.csv')#其它信息
entprise_info=pd.read_csv(folderPath+'entprise_info.csv')#企业标注信息{0: 13884, 1: 981}
entprise_evaluate=pd.read_csv(folderPath+'entprise_evaluate.csv')#未标注信息
# 剔除纯空列
base_info = base_info.drop(filter_col_by_nan(base_info, 0.01), axis=1)
annual_report_info = annual_report_info.drop(filter_col_by_nan(annual_report_info, 0.01), axis=1)
other_info = other_info[~other_info['id'].duplicated()]
other_info['other_SUM'] = other_info[['legal_judgment_num', 'brand_num', 'patent_num']].sum(1)
other_info['other_NULL_SUM'] = other_info[['legal_judgment_num', 'brand_num', 'patent_num']].isnull().astype(int).sum(1)
news_info['public_date'] = news_info['public_date'].apply(lambda x: x if '-' in str(x) else np.nan)
news_info['public_date'] = pd.to_datetime(news_info['public_date'])
current = pd.to_datetime('11/21/20')
news_info['public_date'] = (current - news_info['public_date']).dt.days
news_info_df = news_info.groupby('id').agg({'public_date': ['count','max','min','mean']}).reset_index()
news_info_df.columns = ['id', 'public_date_COUNT', 'public_MAX', 'public_MIN', 'public_MEAN']
news_info_df2 = pd.pivot_table(news_info, index='id', columns='positive_negtive', aggfunc='count').reset_index()
news_info_df2.columns = ['id', 'news_COUNT1', 'news_COUNT2', 'news_COUNT3']
news_info_df = pd.merge(news_info_df, news_info_df2)
tax_info_df = tax_info.groupby('id').agg({
    'TAX_CATEGORIES': ['count'],
    'TAX_ITEMS': ['count'],
    'TAXATION_BASIS': ['count'],
    'TAX_AMOUNT': ['max', 'min', 'mean'],
})
tax_info_df.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() 
                                for e in tax_info_df.columns.tolist()])
tax_info_df = tax_info_df.reset_index()

change_info['bgrq'] = (change_info['bgrq'] / 10000000000).astype(int)

change_info_df = change_info.groupby('id').agg({
    'bgxmdm': ['count', 'nunique'],
    'bgq':['nunique'],
    'bgh': ['nunique'],
    'bgrq': ['nunique'],
})
change_info_df.columns = pd.Index(['changeinfo_' + e[0] + "_" + e[1].upper() 
                                for e in change_info_df.columns.tolist()])
change_info_df = change_info_df.reset_index()
annual_report_info_df = annual_report_info.groupby('id').agg({
    'ANCHEYEAR': ['max'],
    'STATE': ['max'],
    'FUNDAM': ['max'],
    'EMPNUM': ['max'],
    'UNEEMPLNUM': ['max', 'sum']
})
annual_report_info_df.columns = pd.Index(['PREV_' + e[0] + "_" + e[1].upper() 
                                for e in annual_report_info_df.columns.tolist()])
annual_report_info_df = annual_report_info_df.reset_index()
print('Process base_info .............')

base_info['district_FLAG1'] = (base_info['orgid'].fillna('').apply(lambda x: str(x)[:6]) == \
    base_info['oplocdistrict'].fillna('').apply(lambda x: str(x)[:6])).astype(int)
base_info['district_FLAG2'] = (base_info['orgid'].fillna('').apply(lambda x: str(x)[:6]) == \
    base_info['jobid'].fillna('').apply(lambda x: str(x)[:6])).astype(int)
base_info['district_FLAG3'] = (base_info['oplocdistrict'].fillna('').apply(lambda x: str(x)[:6]) == \
    base_info['jobid'].fillna('').apply(lambda x: str(x)[:6])).astype(int)

base_info['person_SUM'] = base_info[['empnum', 'parnum', 'exenum']].sum(1)
base_info['person_NULL_SUM'] = base_info[['empnum', 'parnum', 'exenum']].isnull().astype(int).sum(1)

base_info['opfrom'] = pd.to_datetime(base_info['opfrom'])
base_info['opto'] = pd.to_datetime(base_info['opto'])
current = pd.to_datetime('11/21/20')
base_info['opfrom_TONOW'] = (current - base_info['opfrom']).dt.days
base_info['opfrom_TIME'] = (base_info['opto'] - base_info['opfrom']).dt.days

base_info['opscope_COUNT'] = base_info['opscope'].apply(lambda x: len(x.replace("\t", "，").replace("\n", "，").split('、')))

cat_col = ['oplocdistrict', 'industryphy', 'industryco', 'enttype',
           'enttypeitem', 'enttypeminu', 'enttypegb',
          'dom', 'oploc', 'opform']

for col in cat_col:
    base_info[col + '_COUNT'] = base_info[col].map(base_info[col].value_counts())
    col_idx = base_info[col].value_counts()
    for idx in col_idx[col_idx < 10].index:
        base_info[col] = base_info[col].replace(idx, -1)        

base_info = base_info.drop(['opfrom', 'opto','opscope'], axis=1)

for col in ['industryphy', 'dom', 'opform', 'oploc']:
    base_info[col] = pd.factorize(base_info[col])[0]

train_data = pd.merge(base_info, entprise_info, on='id')
train_data = pd.merge(train_data, other_info, on='id', how='left')

train_data = pd.merge(train_data, news_info_df, on='id', how='left')
train_data = pd.merge(train_data, tax_info_df, on='id', how='left')
train_data = pd.merge(train_data, annual_report_info_df, on='id', how='left')
train_data = pd.merge(train_data, change_info_df, on='id', how='left')

entprise_evaluate = entprise_evaluate[['id']]
test_data = pd.merge(base_info, entprise_evaluate, on='id')
test_data = pd.merge(test_data, other_info, on='id', how='left')
test_data = pd.merge(test_data, news_info_df, on='id', how='left')
test_data = pd.merge(test_data, tax_info_df, on='id', how='left')
test_data = pd.merge(test_data, annual_report_info_df, on='id', how='left')
test_data = pd.merge(test_data, change_info_df, on='id', how='left')

#train_data.to_csv(folderPath+'train2.csv',index=False)
#test_data.to_csv(folderPath+'test2.csv',index=False)

def eval_score(y_test,y_pre):
    _,_,f_class,_=precision_recall_fscore_support(y_true=y_test,y_pred=y_pre,labels=[0,1],average=None)
    fper_class={'合法':f_class[0],'违法':f_class[1],'f1':f1_score(y_test,y_pre)}
    return fper_class

def pro_to_label(y_pro):
    length = len(y_pro)
    y_label = np.zeros(length)    
    for i in range(length):
        if y_pro[i] > 0.5:
            y_label[i] = 1
    return y_label

def k_fold_serachParmaters(lgbmodel,catmodel,train_val_data,train_val_kind, test_kind):
    mean_f1=0
    n_splits=5
    a=1.0
    b=1.0
    cat_features = ['oplocdistrict', 'industryphy', 'industryco', 'enttype',
           'enttypeitem', 'enttypeminu', 'enttypegb',
          'dom', 'oploc', 'opform']
    
    test_kind_cat=test_kind
    test_kind_cat[cat_features]=test_kind_cat[cat_features].fillna(-1)
    test_kind_cat[cat_features]=test_kind_cat[cat_features].astype(int)
    
    sk = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=2020)
    pred_Test = np.zeros(len(test_kind))
    for train, test in sk.split(train_val_data, train_val_kind):
        x_train = train_val_data.iloc[train]
        y_train = train_val_kind.iloc[train]
        x_test = train_val_data.iloc[test]
        y_test = train_val_kind.iloc[test]
        
        x_train_cat=x_train
        y_train_cat=y_train
        x_test_cat=x_test
        x_train_cat[cat_features]=x_train_cat[cat_features].fillna(-1)
        x_test_cat[cat_features]=x_test_cat[cat_features].fillna(-1)
        x_train_cat[cat_features]=x_train_cat[cat_features].astype(int)
        
        catmodel.fit(x_train_cat, y_train_cat)

        lgbmodel.fit(x_train, y_train, 
                  eval_set=[(x_test, y_test)], 
                  categorical_feature = cat_features,
                 early_stopping_rounds=100
                 ,verbose=False)
        
        pred = (a*lgbmodel.predict_proba(x_test)[:, 1]+b*catmodel.predict_proba(x_test_cat)[:, 1])/2
        pred = pro_to_label(pred)
        fper_class = eval_score(y_test,pred)
        
        pred_Test += (a*lgbmodel.predict_proba(test_kind)[:, 1]+b*catmodel.predict_proba(test_kind_cat)[:, 1])/2/n_splits
        mean_f1 += fper_class['f1']/n_splits
    
    print(mean_f1)        
        
    return mean_f1, pred_Test

print('Training Model2 and Model3.................')

score_tta = None
score_list = []

tta_fold = 20
rand1 = np.array([7,10,8,8,8,9,10,10,9,7,9,10,9,8,7,6,7,6,7,9])
rand2 = np.array([3,3,2,5,5,3,4,5,2,4,4,4,5,4,3,2,5,3,2,4])
rand3 = np.array([57,57,63,57,60,61,58,57,60,61,62,56,64,62,62,59,55,61,59,64])
rand4 = np.array([12,9,10,11,10,10,10,11,10,10,11,11,9,10,10,11,11,10,10,11]) 

for i in range(tta_fold):
    lgbclf = lgb.LGBMClassifier(num_leaves=rand1[i], min_child_samples=rand2[i],
        max_depth=5,learning_rate=0.03,
        n_estimators=150,n_jobs=-1)
    
    catclf=cab.CatBoostClassifier(iterations=rand3[i]
                              ,learning_rate=0.05
                              ,depth=rand4[i]
                              ,silent=True
                              ,thread_count=8
                              ,train_dir=userfolderPath
                              ,task_type='CPU'
                              )
    
    score, test_pred = k_fold_serachParmaters(lgbclf, catclf,
                           train_data.drop(['id','label'], axis=1),
                           train_data['label'],
                           test_data.drop(['id'], axis=1)
                          )

    if score_tta is None:
        score_tta = test_pred/tta_fold
    else:
        score_tta += test_pred/tta_fold

    score_list.append(score)

print('Average of Model2 and Model3 is ready.') 
print('f1 for average of Model2 and Model3 = ', np.array(score_list).mean())

test_data['aver_score'] = score_tta
model2_result=test_data[['id', 'aver_score']]
final_result= pd.merge(vote_result, model2_result, on='id')
final_result['score_pro']=(final_result['score_vote']+2*final_result['aver_score'])/3
final_result['score']=pro_to_label(final_result['score_pro'])

final_result[['id','score']].to_csv(resultPath+'result.csv',index=False)
print('Final result is exported successfully.') 


