
# coding: utf-8



import warnings
warnings.filterwarnings('ignore')
from base_helper import *
test_op_rd1 = get_operation_round1_new()
train_op_tr = get_operation_train_new()
train_tst = get_transaction_train_new()
test_op_rd1 = get_transaction_round1_new()

tag = get_tag_train_new()


train_op_trs = pd.merge(train_op_tr, tag, on='UID', how='left')
train_tsts =  pd.merge(train_tst, tag, on='UID', how='left')

# 提取相关的feature


# # day相关的feature

train_op_feature = pd.DataFrame({'UID':[i for i in train_op_trs.UID.unique()]})

'''总共的操作次数'''
train_op_feature = pd.merge(train_op_feature, 
                            train_op_trs.groupby('UID', as_index=False)['day'].agg({'op_count':'count'}), on='UID', how='left')

'''一共参与的天数'''
train_day = train_op_trs.copy()
train_day.drop_duplicates(['UID', 'day'], inplace=True)
train_day = train_day.groupby('UID', as_index=False)['day'].agg({'day_count_op':'count'})
train_op_feature = pd.merge(train_op_feature, train_day, on='UID', how='left')

# # mode字段的相关分析与提取

'''平均每天不同的code的次数'''
train_mode = train_op_trs.drop_duplicates(subset=['UID','day', 'mode'])

a = train_mode['mode'].value_counts().index.tolist()
b = [i+1 for i in range(15)] + ([-1]*(len(a) - 15))
dicts = dict(zip(a,b))
train_mode['mode'] = train_mode['mode'].map(dicts)

temp = train_mode.groupby(['UID', 'mode'])['day'].count()
temp = temp.unstack('mode').reset_index()
cols = temp.columns.tolist()
temp.columns = cols
temp.columns = [temp.columns[0]] + ['mode'+'_'+ str(i) for i in temp.columns[1:]]
temp.fillna(0,inplace=True)
train_op_feature = pd.merge(train_op_feature, temp, on='UID', how='left')


# # 操作成功率与失败率
'''操作的成功率以及失败率以及缺失率'''
train_success = train_op_trs.copy()
train_success['success'].fillna(2, inplace=True)
train_success.success = train_success.success.astype(int)
train_success = pd.concat([train_success.UID, pd.get_dummies(train_success.success).add_prefix('success_')], axis=1)
success_func = lambda x: x.sum() / x.count()
train_success = train_success.groupby('UID', as_index=False).agg(success_func)
train_op_feature = pd.merge(train_op_feature, train_success, on='UID', how='left')


# # time处理方法

'''统计每个不同时间段的交易次数，以及每个用户每个时间段的平均交易次数'''
train_time = train_op_trs.copy()
train_time.time = train_time.time.str[:2]

train_time.time = train_time.time.astype('int')

def time_help(data):
    if (data > 7) & (data <= 12):
        return 0
    elif (data > 12) & (data <= 19):
        return 1
    elif (data > 19) & (data <= 24):
        return 2
    elif (data >= 0) & (data <= 7):
        return 3

train_time.time = train_time.time.map(time_help)
train_time = pd.concat([train_time.UID, pd.get_dummies(train_time.time, prefix='time_op')], axis=1)
train_time = train_time.groupby('UID', as_index=False).sum()
train_op_feature = pd.merge(train_op_feature, train_time, on='UID', how='left')


# # os操作系统的处理分析方法
# 101 不知道是什么操作系统的情况
# 102 安卓手机
# 103 苹果系列手机
# 104 电脑操作
# 105 电脑操作 +  mode=”8e463287d7146285 ”强操作，一定为欺诈用户
# 107 大部分电脑操作 + mode=‘d25caee90b27fa9b ’强操作， 一定为欺诈用户
# 

'''统计用户一共有多少个操作系统以及每多少天换一个操作系统'''
train_os = train_op_trs.copy()
train_os.drop_duplicates(subset=['UID', 'os'], inplace=True)
train_os = train_os.groupby('UID', as_index=False).os.count()
train_op_feature = pd.merge(train_op_feature, train_os, on='UID', how='left')
'''平均一天的操作系统的个数'''
train_os_day = train_op_trs.copy()
train_os_day.drop_duplicates(subset=['UID', 'day', 'os'], inplace=True)
train_os_day = train_os_day.groupby('UID', as_index=False).os.agg({'os_day':'count'})
train_op_feature = pd.merge(train_op_feature, train_os_day, on='UID', how='left')


# # 是否为苹果手机

'''是否wie苹果手机的判断'''
train_apple = train_op_trs.copy()
train_apple.device_code3 = train_apple.device_code3.fillna(0)
train_apple.device_code1 = train_apple.device_code1.fillna(0)

train_apple.device_code3= train_apple.device_code3.apply(lambda x: x if x == 0 else 1)
train_apple.device_code1 = train_apple.device_code1.apply(lambda x: x if x == 0 else 1)

train_apple = train_apple.groupby('UID', as_index=False)[['device_code1', 'device_code3']].sum()
train_apple['is_apple'] = np.where(train_apple.device_code1 > train_apple.device_code3, 0,1)
train_apple.drop(['device_code1', 'device_code3'], axis=1, inplace=True)
train_op_feature = pd.merge(train_op_feature, train_apple, on='UID', how='left')


# # 同一个用户不同的设备个数

'''y用户设备的个数以及每天不同设备的个数'''
train_device = train_op_trs.copy()

train_device.device2 = np.where((train_device.device2.isnull()) & (train_device.ip2_sub.isnull()), 
                                0,train_device.device2)
train_device.device2 = train_device.device2.fillna(1)

train_device1 = train_device.groupby(['UID', 'device2'], as_index=False).count()
train_device1 = train_device1.groupby(['UID'], as_index=False)['device2'].agg({'device_count':'count'})
train_op_feature = pd.merge(train_op_feature, train_device1, on='UID', how='left')

train_device_day = train_device.groupby(['UID','day', 'device2'], as_index=False).count()
train_device_day = train_device_day.groupby(['UID'], as_index=False)['device2'].agg({'device_day':'count'})
train_op_feature = pd.merge(train_op_feature, train_device_day, on='UID', how='left')


# # mac1分析的个数

train_op_trs[train_op_trs.UID == 10000]

'''统计用户mac地址的个数'''
train_mac = train_op_trs.copy()
train_mac.mac1 = train_mac.mac1.fillna(0)
train_mac = train_mac.groupby(['UID', 'mac1'], as_index=False)['day'].agg({'mac_count':'count'})
train_mac['ratio'] = np.where(train_mac.mac1 == 0, 0, 1)
train_mac_count = train_mac.groupby('UID', as_index=False)['mac1'].count()
train_op_feature = pd.merge(train_op_feature, train_mac_count, on='UID', how='left')

'''统计mac地址的缺失率'''
train_mac_loss = train_op_trs.groupby('UID', as_index=False).count()
train_mac_loss['mac_loss'] = train_mac_loss.mac1 / train_mac_loss.day
train_mac_loss = train_mac_loss[['UID', 'mac_loss']]
train_op_feature = pd.merge(train_op_feature, train_mac_loss, on='UID', how='left')


# # ip地址统计分析，以及是不是电脑


'''统计分析有多少个ip以及有是否同时在电脑和手机'''
train_ip =train_op_trs.copy()
train_ip['ip'] = 0
train_ip.ip1 = train_ip.ip1.fillna('0').astype(str)
train_ip.ip2 = train_ip.ip2.fillna('0').astype(str)
train_ip['is_phone_computer'] = np.where((train_ip.ip1!='0')&(train_ip.ip2!='0'), 0, 1)
train_phone_computer = train_ip.groupby(['UID', 'is_phone_computer'], as_index=False).count()
train_phone_computer = train_phone_computer.groupby('UID', as_index=False)['is_phone_computer'].count()
train_op_feature = pd.merge(train_op_feature, train_phone_computer, on='UID', how='left')
train_ip['ip'] = train_ip['ip1'] + train_ip['ip2']
train_ip = train_ip.groupby(['UID', 'ip'], as_index=False).count()
train_ip = train_ip.groupby('UID', as_index=False)['ip'].agg({'ip_number':'count'})
train_op_feature = pd.merge(train_op_feature, train_ip, on='UID', how='left')


# # weifi分析,统计weifi个数

'''wifi的个数以及每天不同的wifi的个数'''
train_wifi = train_op_trs.copy()
train_wifi.wifi = train_wifi.wifi.fillna(0)

train_wifi1 = train_wifi.drop_duplicates(subset=['UID', 'wifi'])
train_wifi1 = train_wifi1.groupby('UID', as_index=False).wifi.count()
train_op_feature = pd.merge(train_op_feature, train_wifi1, on='UID', how='left')
'''平均一天的操作系统的个数'''
train_wifi_day  = train_wifi.drop_duplicates(subset=['UID', 'day', 'wifi'])
train_wifi_day = train_wifi_day.groupby('UID', as_index=False).wifi.agg({'wifi_day':'count'})
train_op_feature = pd.merge(train_op_feature, train_wifi_day, on='UID', how='left')

train_is_wefi = train_op_trs.copy()
train_is_wefi.wifi = train_is_wefi.wifi.fillna(0)
train_is_wefi['wefi_loss'] = np.where(train_is_wefi.wifi == 0, 0, 1)

# train_is_wefi.groupby('UID').apply(lambda x: len(x[x['wefi_loss'] == 0]) / len(x)).reset_index()

funcs = lambda x: x[x == 0].count() / x.count()
train_is_wefi = train_is_wefi.groupby('UID', as_index=False).agg({'wefi_loss':funcs})
train_op_feature = pd.merge(train_op_feature, train_is_wefi, on='UID', how='left')


# # geo_code地理位置的分析与处理,提取每天地理位置的变化情况，以及地理位置缺失率

train_geo = train_op_trs.copy()
train_geo.geo_code = train_geo.geo_code.fillna(0)
train_geo = train_geo.drop_duplicates(subset=['UID', 'day', 'geo_code'],keep='first')
train_geo = train_geo.groupby(['UID', 'day'], as_index=False)['geo_code'].count()
train_geo = train_geo.groupby('UID', as_index=False).agg({'day':'count', 'geo_code':'sum'})
train_geo['avg_day_geo'] = train_geo['geo_code'] / train_geo['day']
train_geo.drop(['day', 'geo_code'], axis=1, inplace=True)
train_op_feature = pd.merge(train_op_feature, train_geo, on='UID', how='left')

train_is_geo = train_op_trs.copy()
train_is_geo.geo_code = train_is_geo.geo_code.fillna(0)
train_is_geo['geo_loss'] = np.where(train_is_geo.geo_code == 0, 0, 1)

# train_is_wefi.groupby('UID').apply(lambda x: len(x[x['wefi_loss'] == 0]) / len(x)).reset_index()

funcs = lambda x: x[x == 0].count() / x.count()
train_is_geo = train_is_geo.groupby('UID', as_index=False).agg({'geo_loss':funcs})
train_op_feature = pd.merge(train_op_feature, train_is_geo, on='UID', how='left')


# # 平均化特征
'''对特征进行天数的平均化操作'''
avg_clos = ['mode_-1', 'mode_1', 'mode_2',
       'mode_3', 'mode_4', 'mode_5', 'mode_6', 'mode_7', 'mode_8', 'mode_9',
       'mode_10', 'mode_11', 'mode_12', 'mode_13', 'mode_14', 'mode_15',
       'success_0', 'success_1', 'success_2', 'time_op_0', 'time_op_1',
       'time_op_2', 'time_op_3', 'os', 'os_day', 'device_count',
       'device_day', 'mac1','ip_number',
       'wifi', 'wifi_day']

for i in avg_clos:
    train_op_feature['avg_op_{}'.format(i)] = test_feature[i] / test_feature['op_count']
    train_op_feature['avg_day_{}'.format(i)] = test_feature[i] / test_feature['day_count_op']

train_op_feature.to_csv('train_op_feature.csv', index=False, encoding='utf-8')

import warnings
warnings.filterwarnings('ignore')
from base_helper import *
train_tst = get_transaction_train_new()

tag = get_tag_train_new()

train_tsts =  pd.merge(train_tst, tag, on='UID', how='left')

train_tst_feature = pd.DataFrame({'UID':[i for i in train_tsts.UID.unique()]})
# # 交易天的个数统计,平均每天的操作次数，每个时间段的操作次数
train_tsts[train_tsts.UID == 10000]

train_tst_day = train_tsts.copy()

'''每个用户的总共交易次数，以及每个用户平均每天的交易次数'''
train_tst_feature = pd.merge(train_tst_feature, 
                        train_tst_day.groupby('UID', as_index=False)['day'].agg({'trans_count':'count'}))
train_tst_day.drop_duplicates(['UID', 'day'], keep='first', inplace=True)
train_tst_day = train_tst_day.groupby('UID', as_index=False)['day'].agg({'day_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_tst_day, on='UID', how='left')

train_tst_feature['avg_day_trans'] = train_tst_feature.trans_count / train_tst_feature.day_count

'''统计每个不同时间段的交易次数，以及每个用户每个时间段的平均交易次数'''
train_time = train_tsts.copy()
train_time.time = train_time.time.str[:2]

train_time.time = train_time.time.astype('int')

def time_help(data):
    if (data > 7) & (data <= 12):
        return 0
    elif (data > 12) & (data <= 19):
        return 1
    elif (data > 19) & (data <= 24):
        return 2
    elif (data >= 0) & (data <= 7):
        return 3

train_time.time = train_time.time.map(time_help)
train_time = pd.concat([train_time.UID, pd.get_dummies(train_time.time, prefix='time')], axis=1)
train_time = train_time.groupby('UID', as_index=False).sum()
train_tst_feature = pd.merge(train_tst_feature, train_time, on='UID', how='left')

time_func = lambda x: 0 if x == np.inf else x
train_tst_feature['avg_time0_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_0).map(time_func)
train_tst_feature['avg_time1_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_1).map(time_func)
train_tst_feature['avg_time2_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_2).map(time_func)
train_tst_feature['avg_time3_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_3).map(time_func)

train_tst_feature.head()


# # 取每个用户的相邻两次交易天数差值的平均值，最大值，最大值

train_time_sub = train_tsts.copy()
train_time_sub.sort_values(by=['UID','day','time'],inplace=True)

train_time_sub['day_shift'] = train_time_sub.groupby('UID')['day'].shift(-1)

train_time_sub = train_time_sub[['UID', 'day', 'time', 'day_shift']]
train_time_sub['sub'] = train_time_sub['day_shift'] - train_time_sub['day']
train_time_sub['sub'] = train_time_sub['sub'].fillna(0)

train_time_sub = train_time_sub.groupby('UID', as_index=False)['sub'].agg({'mean','max','min'}).add_prefix('day_shift_')
train_tst_feature = pd.merge(train_tst_feature, train_time_sub, on='UID', how='left')


# # 交易平台的个数，以及每个平台的交易金额的大小trans_amt的统计信息值

train_tsts[train_tsts.channel.isnull()]
train_tsts[train_tsts.UID == 10002]

'''交易平台的个数以及平均每天使用不同平台的个数'''
train_channel_count = train_tsts.copy()
train_channel_count.drop_duplicates(['UID', 'channel'], inplace=True)
train_channel_count = train_channel_count.groupby('UID', as_index=False)['channel'].agg({'channel_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_channel_count, on='UID', how='left')
train_tst_feature['avg_day_diff_channel'] = train_tst_feature['channel_count'] / train_tst_feature['day_count']

'''平均每天使用的平台个数'''
train_avg_channel = train_tsts.copy()
train_avg_channel.drop_duplicates(['UID', 'day', 'channel'], inplace=True)
train_avg_channel = train_avg_channel.groupby('UID', as_index=False)['channel'].agg({'channel_day_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_avg_channel, on='UID', how='left')
train_tst_feature['avg_day_channel'] = train_tst_feature['channel_day_count'] / train_tst_feature['day_count']

'''每个平台的交易金额的统计信息值'''
train_channel_amt = train_tsts.copy()
train_channel_amt = train_channel_amt.groupby(['UID','channel'])['trans_amt'].agg({'mean','max','min'}).add_prefix('amt_channel_').unstack('channel')
train_channel_amt.columns = [x[0]+"_"+str(x[1]) for x in train_channel_amt.columns.ravel()]
train_channel_amt.fillna(0, inplace=True)
train_channel_amt.reset_index(inplace=True)
train_tst_feature = pd.merge(train_tst_feature, train_channel_amt, on='UID', how='left')


# # 营销活动的相关特征，每天参与营销活动的类型，一共参与的营销活动，每个营销类型的金额相关特征

'''平均每天参与的营销活动次数'''
train_market_code = train_tsts.copy()
train_market_code['market_code'].fillna(0, inplace=True)
train_market_code.drop_duplicates(['UID','day', 'market_code'], inplace=True)
train_market_code = train_market_code.groupby('UID', as_index=False)['market_code'].agg({'mark_code_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_market_code, on='UID', how='left')
train_tst_feature['avg_day_mark_code'] = train_tst_feature['mark_code_count'] / train_tst_feature['day_count']

'''平均每天参与不同营销活动的次数'''
train_market_code_diff = train_tsts.copy()
train_market_code_diff['market_code'].fillna(0, inplace=True)
train_market_code_diff.drop_duplicates(['UID', 'market_code'], inplace=True)
train_market_code_diff = train_market_code_diff.groupby('UID', as_index=False)['market_code'].agg({'mark_code_count_diff':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_market_code_diff, on='UID', how='left')
train_tst_feature['avg_day_diff_mark_code'] = train_tst_feature['mark_code_count_diff'] / train_tst_feature['day_count']

'''不同的营销类型的金额统计信息'''
train_market_type = train_tsts.copy()
train_market_type['market_type'].fillna(0, inplace=True)
train_market_type = train_market_type.groupby(['UID','market_type'])['trans_amt'].agg({'mean','max','min'}).add_prefix('market_type_').unstack()
train_market_type.columns = [x[0]+"_"+str(x[1]) for x in train_market_type.columns.ravel()]
train_market_type.fillna(0, inplace=True)
train_market_type.reset_index(inplace=True)
train_tst_feature = pd.merge(train_tst_feature, train_market_type, on='UID', how='left')


# # 交易类型的统计分析以及和交易金额的大小之间的关系信息

print(train_tsts['trans_type1'].unique(), len(train_tsts['trans_type1'].unique()))

'''平均每天交易类型1,2的次数'''
train_type_1 = train_tsts.copy()
train_type_1.drop_duplicates(['UID', 'day', 'trans_type1'], inplace=True)
train_type_1 = train_type_1.groupby('UID', as_index=False)['trans_type1'].agg({'trans_type1_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_type_1, on='UID', how='left')
train_tst_feature['avg_day_trans_type1'] = train_tst_feature['trans_type1_count'] / train_tst_feature['day_count']


train_type_2 = train_tsts.copy()
train_type_2.drop_duplicates(['UID', 'day', 'trans_type2'], inplace=True)
train_type_2 = train_type_2.groupby('UID', as_index=False)['trans_type2'].agg({'trans_type2_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_type_2, on='UID', how='left')
train_tst_feature['avg_day_trans_type2'] = train_tst_feature['trans_type2_count'] / train_tst_feature['day_count']

'''每个用户的交类型2的金额统计信息'''
train_type2_amt = train_tsts.copy()
train_type2_amt['trans_type2'].fillna(0, inplace=True)
train_type2_amt = train_type2_amt.groupby(['UID','trans_type2'])['trans_amt'].agg({'mean','max','min'}).add_prefix('trans_type2_').unstack()
train_type2_amt.columns = [x[0]+"_"+str(int(x[1])) for x in train_type2_amt.columns.ravel()]
train_type2_amt.fillna(0, inplace=True)
train_type2_amt.reset_index(inplace=True)
train_tst_feature = pd.merge(train_tst_feature, train_type2_amt, on='UID', how='left')


# # 交易金额和余额的统计信息trans_amt, bal

'''交易金额和余额的统计信息'''
train_amt = train_tsts.copy()
train_amt = train_amt.groupby('UID')[['trans_amt', 'bal']].agg({'mean', 'min','max', 'sum'})
train_amt.columns = [x[0]+'_'+x[1] for x in train_amt.columns.ravel()]
train_amt.reset_index(inplace=True)
train_tst_feature = pd.merge(train_tst_feature, train_amt, on='UID', how='left')
train_tst_feature['avg_trans_amt'] = train_tst_feature['trans_amt_sum'] / train_tst_feature['day_count']
train_tst_feature['avg_bal_amt'] = train_tst_feature['bal_sum'] / train_tst_feature['day_count']


# # 交易资金类型amt_src1

train_amt_src = train_tsts.copy()
train_tst_feature = pd.merge(train_tst_feature, train_amt_src.groupby('UID', as_index=False)['amt_src1'].count())
train_amt_src.drop_duplicates(['UID', 'day', 'amt_src1'], inplace=True)
train_amt_src = train_amt_src.groupby('UID', as_index=False)['amt_src1'].agg({'amt_src_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_amt_src, on='UID', how='left')
train_tst_feature['avg_day_amt_src'] = train_tst_feature['amt_src_count'] / train_tst_feature['day_count']


# # 商家的信息统计方法与分析，merchant, code1, code2

'''商家的个数，平均每天逛商家的个数'''
train_merchant= train_tsts.copy()
train_tst_feature = pd.merge(train_tst_feature, train_merchant.groupby('UID', as_index=False)['merchant'].count())
train_merchant.drop_duplicates(['UID', 'day', 'merchant'], inplace=True)
train_merchant = train_merchant.groupby('UID', as_index=False)['merchant'].agg({'merchant_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_merchant, on='UID', how='left')
train_tst_feature['avg_day_merchant'] = train_tst_feature['merchant_count'] / train_tst_feature['day_count']

temp1 = train_tst.groupby(['UID', 'merchant'], as_index=False)['trans_amt'].agg({'sum', 'mean'}).add_prefix('merchant_').reset_index()
temp1['trans_mer_number'] = temp1['merchant_sum'] / temp1['merchant_mean']
temp1 = temp1.groupby('UID').agg({'mean','max', 'min','sum'})
temp1.columns = [x[0]+'_'+x[1] for x in temp1.columns.ravel()]
temp1.reset_index(inplace=True)
train_tst_feature = pd.merge(train_tst_feature, temp1, on='UID', how='left')

train_tst_feature.columns


# # 交易表中地理位置信息分析

'''平均每天变动的地理位置'''
train_geo = train_tsts.copy()
train_geo.geo_code = train_geo.geo_code.fillna(0)
train_geo = train_geo.drop_duplicates(subset=['UID', 'day', 'geo_code'],keep='first')
train_geo = train_geo.groupby(['UID', 'day'], as_index=False)['geo_code'].count()
train_geo = train_geo.groupby('UID', as_index=False).agg({'day':'count', 'geo_code':'sum'})
train_geo['avg_day_geo_tst'] = train_geo['geo_code'] / train_geo['day']
train_geo.drop(['day'], axis=1, inplace=True)
train_tst_feature = pd.merge(train_tst_feature, train_geo, on='UID', how='left')

'''平均多少次交易变动一次地理位置'''
train_geo_trans = train_tsts.copy()
train_geo_trans['geo_code'] = train_geo_trans['geo_code'].fillna(0)
train_geo_trans.drop_duplicates(['UID', 'geo_code'], inplace=True)
train_geo_trans = train_geo_trans.groupby('UID', as_index=False)['geo_code'].agg({'avg_trans_geo_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_geo_trans, on='UID', how='left')
train_tst_feature['avg_trans_geo'] = train_tst_feature['trans_count'] / train_tst_feature['avg_trans_geo_count']


# # 用户交易的帐号信息acc_id1

'''交易帐号的个数以及帐号变动的次数'''
train_acc_id = train_tsts.copy()
train_acc_id['acc_id1'] = train_acc_id['acc_id1'].fillna(0)
train_acc_id.drop_duplicates(['UID', 'acc_id1'], inplace=True)
train_acc_id = train_acc_id.groupby('UID', as_index=False)['acc_id1'].agg({'acc_id1_count':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_acc_id, on='UID', how='left')
train_tst_feature['avg_trans_acc_id1'] = train_tst_feature['trans_count'] / train_tst_feature['acc_id1_count']


# # 交易表的设备特征提取

'''一个用户用了几个设置进行交易, 以及没多少次交易换设备，以及平均每天设备个数'''
train_device = train_tsts.copy()
train_device.device2 = train_device.device2.fillna(0)
train_tst_feature = pd.merge(train_tst_feature, train_device.groupby('UID', as_index=False)['device2'].agg({'device_tst':'count'}))
train_device.drop_duplicates(['UID','day','device2'], inplace=True)
train_device = train_device.groupby(['UID'], as_index=False)['device2'].agg({'device_count_tst':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_device, on='UID', how='left')
train_tst_feature['avg_trans_device'] =  train_tst_feature['trans_count'] / train_tst_feature['device_count_tst']
train_tst_feature['avg_day_decive_tst'] = train_tst_feature['device_tst'] / train_tst_feature['day_count']


# # mac1地址的统计分析

'''统计用户mac地址的个数'''
train_mac = train_tsts.copy()
train_mac.mac1 = train_mac.mac1.fillna(0)
train_mac = train_mac.groupby(['UID', 'mac1'], as_index=False)['day'].agg({'mac_count_tst':'count'})
train_mac['ratio'] = np.where(train_mac.mac1 == 0, 0, 1)
train_mac_count = train_mac.groupby('UID', as_index=False)['mac1'].count()
train_tst_feature = pd.merge(train_tst_feature, train_mac_count, on='UID', how='left')

'''统计mac地址的缺失率'''
train_mac_loss = train_tsts.groupby('UID', as_index=False).count()
train_mac_loss['mac_loss_tst'] = train_mac_loss.mac1 / train_mac_loss.day
train_mac_loss = train_mac_loss[['UID', 'mac_loss_tst']]
train_tst_feature = pd.merge(train_tst_feature, train_mac_loss, on='UID', how='left')


# # 交易表中交易是否为苹果手机
train_apple = train_tsts.copy()
train_apple.device_code3 = train_apple.device_code3.fillna(0)
train_apple.device_code1 = train_apple.device_code1.fillna(0)

train_apple.device_code3= train_apple.device_code3.apply(lambda x: x if x == 0 else 1)
train_apple.device_code1 = train_apple.device_code1.apply(lambda x: x if x == 0 else 1)

train_apple = train_apple.groupby('UID', as_index=False)[['device_code1', 'device_code3']].sum()
train_apple['is_apple_tst'] = np.where(train_apple.device_code1 > train_apple.device_code3, 0,1)
train_apple['is_apple_an_tst'] = np.where((train_apple.device_code1!=0)&(train_apple.device_code3!=0),1,0)
train_apple.drop(['device_code1', 'device_code3'], axis=1, inplace=True)
train_tst_feature = pd.merge(train_tst_feature, train_apple, on='UID', how='left')


# # ip地址信息的相关统计分析ip1,ip1_sub
'''平均每天换多少个ip'''
train_ip = train_tsts.copy()
train_ip['ip1'] = train_ip['ip1'].fillna(0)
train_ip.drop_duplicates(['UID', 'day', 'ip1'], inplace=True)
train_ip = train_ip.groupby('UID', as_index=False)['ip1'].agg({'ip1_count_tst':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_ip, on='UID', how='left')
train_tst_feature['avg_day_ip_tst'] = train_tst_feature['ip1_count_tst'] / train_tst_feature['day_count']


'''每多少次交易换一次ip'''
train_ip_trans = train_tsts.copy()
train_ip_trans['ip1'] = train_ip_trans['ip1'].fillna(0)
train_ip_trans = train_ip_trans.groupby('UID', as_index=False)['ip1'].agg({'ip1_count_unique':'count'})
train_tst_feature = pd.merge(train_tst_feature, train_ip_trans, on='UID', how='left')
train_tst_feature['avg_trans_ip1'] = train_tst_feature['trans_count'] / train_tst_feature['ip1_count_unique']


'''交易ip的缺失率'''
train_ip_loss = train_tsts.groupby('UID', as_index=False).count()
train_ip_loss['ip_loss_tst'] = 1 - train_ip_loss.ip1 / train_ip_loss.day
train_ip_loss = train_ip_loss[['UID', 'ip_loss_tst']]
train_tst_feature = pd.merge(train_tst_feature, train_ip_loss, on='UID', how='left')


# # 特征的保存
train_tst_feature.to_csv('./train_tst_feature.csv', index=False, encoding='utf-8')












