# -*- coding: utf-8 -*-
"""
@author: quincyqiang
@software: PyCharm
@file: gen_feas.py
@time: 2020/9/2 23:36
@description：
"""
import time
import pickle
import multiprocessing
import numpy as np
import pandas as pd
from tqdm import tqdm
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelEncoder
# import nltk
from sklearn.cluster import KMeans
# from nltk.corpus import stopwords
from sklearn.ensemble import RandomForestRegressor
from tqdm import tqdm
import os
import datetime
from tqdm import tqdm
import warnings
import datetime
from sklearn.preprocessing import LabelEncoder

warnings.filterwarnings("ignore")
tqdm.pandas()

print("正在构建特征。。。。")

def get_le(ser):
    ser = ser.astype(str)
    le = LabelEncoder()
    le.fit(ser.unique())
    ser = le.transform(ser)
    return ser


train = pd.read_csv('../raw_data/train/entprise_info.csv')
test = pd.read_csv('../raw_data/entprise_evaluate.csv')
test.columns = ['id', 'label']
annual_report_info = pd.read_csv('../raw_data/train/annual_report_info.csv')  # id:8397 shape:22550 23
base_info = pd.read_csv('../raw_data/train/base_info.csv')  # id:24865 shape:24865 33
change_info = pd.read_csv('../raw_data/train/change_info.csv')  # id:8726  (45940, 5)
# entprise_info=pd.read_csv('data/train/entprise_info.csv') # 14865 14865 2
news_info = pd.read_csv('../raw_data/train/news_info.csv')  # 927 (10518, 3)
other_info = pd.read_csv('../raw_data/train/other_info.csv')  # 1888 (1890, 4)
tax_info = pd.read_csv('../raw_data/train/tax_info.csv')  # 808 (29195, 9)

train = pd.merge(train, base_info, how='left', on='id')
test = pd.merge(test, base_info, how='left', on='id')

annual_report_info['BUSSTNAME'] = get_le(annual_report_info['BUSSTNAME'])
annual_report_info_18 = annual_report_info[annual_report_info['ANCHEYEAR'] == 2018]
annual_report_info_17 = annual_report_info[annual_report_info['ANCHEYEAR'] == 2017]
annual_report_info_16 = annual_report_info[annual_report_info['ANCHEYEAR'] == 2016]
# annual_report_info_15 = annual_report_info[annual_report_info['ANCHEYEAR'] == 2015]

# annual_report_info_18['have'] = 1
train = pd.merge(train, annual_report_info_18.drop_duplicates(subset=['id']), on=['id'], how='left')
test = pd.merge(test, annual_report_info_18.drop_duplicates(subset=['id']), on=['id'], how='left')
train = pd.merge(train, annual_report_info_17.drop_duplicates(subset=['id']), on=['id'], how='left')
test = pd.merge(test, annual_report_info_17.drop_duplicates(subset=['id']), on=['id'], how='left')
train = pd.merge(train, annual_report_info_16.drop_duplicates(subset=['id']), on=['id'], how='left')
test = pd.merge(test, annual_report_info_16.drop_duplicates(subset=['id']), on=['id'], how='left')

annual_report_info['have'] = 1
annual_report_info_gb = annual_report_info.groupby('id')['have'].count().reset_index()
train = pd.merge(train, annual_report_info_gb.drop_duplicates(subset=['id']), on=['id'], how='left')
test = pd.merge(test, annual_report_info_gb.drop_duplicates(subset=['id']), on=['id'], how='left')

train_size = len(train)
data = pd.concat([train, test], axis=0).reset_index(drop=True)

amount_feas = list(data.select_dtypes(exclude=['object']).columns)
amount_feas = [fea for fea in amount_feas if fea not in ['id', 'label']]
category_fea = list(filter(lambda x: x not in amount_feas + ['id', 'label'], list(data.columns)))

# ============ pengfei========
news_info_ids = news_info['id'].unique()
data['have_news_info'] = data['id'].apply(lambda x: 1 if x in news_info_ids else 0)
news_info_jj = news_info[news_info['positive_negtive'] == '积极']
news_info_zl = news_info[news_info['positive_negtive'] == '中立']
news_info_xj = news_info[news_info['positive_negtive'] == '消极']


def get_jj_count(id):
    return len(news_info_jj[news_info_jj['id'] == id])


data['jj_count'] = data['id'].apply(lambda x: get_jj_count(x))
type_num = {
    '中立': 1,
    '积极': 2,
    '消极': 3,
}
news_info['public_date'] = news_info['public_date'].apply(lambda x: '2020-9-12' if '前' in x else x)


def get_recent_new(id):
    if len(news_info[news_info['id'] == id]):
        return type_num[list(news_info[news_info['id'] == id]['positive_negtive'])[-1]]
    else:
        return 0


data['recent'] = data['id'].apply(lambda x: get_recent_new(x))


def get_tax_info_count(id):
    return len(change_info[change_info['id'] == id])


data['news_count'] = data['id'].apply(lambda x: get_tax_info_count(x))
# =====================


for fea in category_fea:
    lb = LabelEncoder()
    data[fea] = data[fea].astype(str)
    # print(data[fea].value_counts())
    data[fea] = lb.fit_transform(data[fea])
cat_list = [i for i in data.columns if i not in ['id', 'label']]
# ===================== amount_feas 分箱特征 ===============
for fea in tqdm(amount_feas, desc="分箱特征"):
    # 通过除法映射到间隔均匀的分箱中，每个分箱的取值范围都是loanAmnt/1000
    data['{}_bin1'.format(fea)] = np.floor_divide(data[fea], 1000)
    ## 通过对数函数映射到指数宽度分箱
    data['{}_bin2'.format(fea)] = np.floor(np.log10(data[fea]))

# ===================== amount_feas 基本聚合特征 ===============
for f in tqdm(amount_feas, desc="amount_feas 基本聚合特征"):
    for cate in category_fea:
        if f != cate:
            data['{}_{}_medi'.format(cate, f)] = data.groupby(cate)[f].transform('median')
            data['{}_{}_mean'.format(cate, f)] = data.groupby(cate)[f].transform('mean')
            data['{}_{}_max'.format(cate, f)] = data.groupby(cate)[f].transform('max')
            data['{}_{}_min'.format(cate, f)] = data.groupby(cate)[f].transform('min')
            data['{}_{}_std'.format(cate, f)] = data.groupby(cate)[f].transform('std')

# =================== amount_feas 基本交叉特征  =============================
for f1 in tqdm(amount_feas, desc="amount_feas 基本交叉特征"):
    for f2 in amount_feas:
        if f1 != f2:
            data['{}_{}_ratio'.format(f1, f2)] = data[f1].values / data[f2].values
            data['{}_{}_multi'.format(f1, f2)] = data[f1].values * data[f2].values
            data['{}_{}_add'.format(f1, f2)] = data[f1].values + data[f2].values
            data['{}_{}_diff'.format(f1, f2)] = data[f1].values - data[f2].values

# 匿名特征信息提取
data['nmean'] = data[amount_feas].mean(1)
data['ntd'] = data[amount_feas].std(1)
data['nsum'] = data[amount_feas].sum(1)

for i in tqdm(category_fea, desc="类别特征nunique特征"):
    for j in category_fea:
        if i != j:
            data['nuni_{0}_{1}'.format(i, j)] = data[i].map(data.groupby(i)[j].nunique())

# ===================== 五折转化率特征 ====================


data['ID'] = data.index
data['fold'] = data['ID'] % 5
data.loc[data['label'].isnull(), 'fold'] = 5
target_feat = []
for i in tqdm(cat_list, desc="5折转化率特征"):
    target_feat.extend([i + '_mean_last_1'])
    data[i + '_mean_last_1'] = None
    for fold in range(6):
        data.loc[data['fold'] == fold, i + '_mean_last_1'] = data[data['fold'] == fold][i].map(
            data[(data['fold'] != fold) & (data['fold'] != 5)].groupby(i)['label'].mean()
        )
    data[i + '_mean_last_1'] = data[i + '_mean_last_1'].astype(float)

no_fea = ['id', 'label', 'ID', 'fold',
          ]
features = [fea for fea in data.columns if fea not in no_fea]
train = data[:train_size]
test = data[train_size:]

print(len(features), features)
del data

train.to_pickle('../user_data/train.pkl')
test.to_pickle('../user_data/test.pkl')

def load_data():
    return train, train['label'], test, features



print("构建特征完毕")
