# coding: utf-8

import warnings

warnings.filterwarnings('ignore')
from tiancheng.base.base_helper import *

test_op_rd = get_operation_round1_new()
train_op_tr = get_operation_train_new()

tag = get_tag_train_new()
test_op_rd[tag_hd.Tag] = -1
train_op_tr = pd.merge(train_op_tr, tag, on='UID', how='left')

'''合并训练测试集'''
train_op_trs = pd.concat([train_op_tr, test_op_rd], axis=0).reset_index(drop=True)

# # 操作表中异常数据的清除， 主要操作系统的处理,以及ip2地址的删除

train_op_trs = train_op_trs[train_op_trs.os.isin([101, 102, 103, 200])].reset_index()
train_op_trs['device_code3'] = np.where(train_op_trs['os'] == 101, '98d2654c0f52dca0', train_op_trs['device_code3'])
train_op_trs.drop(['ip2', 'ip2_sub'], axis=1, inplace=True)

# # day相关的feature

train_op_feature = pd.DataFrame({'UID': [i for i in train_op_trs.UID.unique()]})
print(train_op_feature.shape)
train_op_feature = train_op_feature.merge(train_op_trs.drop_duplicates(['UID', 'Tag'])[['UID', 'Tag']],
                                          on='UID', how='left')
print(train_op_feature.shape)

'''总共的操作次数'''
train_op_feature = pd.merge(train_op_feature,
                            train_op_trs.groupby('UID', as_index=False)['day'].agg({'op_count': 'count'}), on='UID',
                            how='left')

'''一共参与的天数'''
train_day = train_op_trs.copy()
train_day.drop_duplicates(['UID', 'day'], inplace=True)
train_day = train_day.groupby('UID', as_index=False)['day'].agg({'day_count_op': 'count'})
train_op_feature = pd.merge(train_op_feature, train_day, on='UID', how='left')

# # mode字段的相关分析与提取

'''平均每天不同的code的次数'''
train_mode = train_op_trs.drop_duplicates(subset=['UID', 'day', 'mode'])

a = train_mode['mode'].value_counts().index.tolist()
b = [i + 1 for i in range(15)] + ([-1] * (len(a) - 15))
dicts = dict(zip(a, b))
train_mode['mode'] = train_mode['mode'].map(dicts)

temp = train_mode.groupby(['UID', 'mode'])['day'].count()
temp = temp.unstack('mode').reset_index()
cols = temp.columns.tolist()
temp.columns = cols
temp.columns = [temp.columns[0]] + ['mode' + '_' + str(i) for i in temp.columns[1:]]
temp.fillna(0, inplace=True)
train_op_feature = pd.merge(train_op_feature, temp, on='UID', how='left')

# # 操作成功率与失败率

'''操作的成功率以及失败率以及缺失率'''
train_success = train_op_trs.copy()
train_success['success'].fillna(2, inplace=True)
train_success.success = train_success.success.astype(int)
train_success = pd.concat([train_success.UID, pd.get_dummies(train_success.success).add_prefix('success_')])
success_func = lambda x: x.sum() / x.count()
train_success.columns = ['UID', 'success_0', 'success_1', 'success_2']
train_success = train_success.groupby('UID', as_index=False).agg(success_func)
train_op_feature = pd.merge(train_op_feature, train_success, on='UID', how='left')

# # time处理方法

'''统计每个不同时间段的交易次数，以及每个用户每个时间段的平均交易次数'''
train_time = train_op_trs.copy()
train_time.time = train_time.time.str[:2]

train_time.time = train_time.time.astype('int')


def time_help(data):
    if (data > 7) & (data <= 12):
        return 0
    elif (data > 12) & (data <= 19):
        return 1
    elif (data > 19) & (data <= 24):
        return 2
    elif (data >= 0) & (data <= 7):
        return 3


train_time.time = train_time.time.map(time_help)
train_time = pd.concat([train_time.UID, pd.get_dummies(train_time.time, prefix='time_op')])
train_time = train_time.groupby('UID', as_index=False).sum()
train_op_feature = pd.merge(train_op_feature, train_time, on='UID', how='left')

'''统计用户一共有多少个操作系统以及每多少天换一个操作系统'''
train_os = train_op_trs.copy()
train_os.drop_duplicates(subset=['UID', 'os'], inplace=True)
train_os = train_os.groupby('UID', as_index=False).os.count()
train_op_feature = pd.merge(train_op_feature, train_os, on='UID', how='left')
'''平均一天的操作系统的个数'''
train_os_day = train_op_trs.copy()
train_os_day.drop_duplicates(subset=['UID', 'day', 'os'], inplace=True)
train_os_day = train_os_day.groupby('UID', as_index=False).os.agg({'os_day': 'count'})
train_op_feature = pd.merge(train_op_feature, train_os_day, on='UID', how='left')

# # 是否为苹果手机


'''是否wie苹果手机的判断'''
train_apple = train_op_trs.copy()
train_apple.device_code3 = train_apple.device_code3.fillna(0)
train_apple.device_code1 = train_apple.device_code1.fillna(0)

train_apple.device_code3 = train_apple.device_code3.apply(lambda x: x if x == 0 else 1)
train_apple.device_code1 = train_apple.device_code1.apply(lambda x: x if x == 0 else 1)

train_apple = train_apple.groupby('UID', as_index=False)[['device_code1', 'device_code3']].sum()
train_apple['is_apple'] = np.where(train_apple.device_code1 > train_apple.device_code3, 0, 1)
train_apple.drop(['device_code1', 'device_code3'], axis=1, inplace=True)
train_op_feature = pd.merge(train_op_feature, train_apple, on='UID', how='left')

# # 同一个用户不同的设备个数


'''y用户设备的个数以及每天不同设备的个数'''
train_device = train_op_trs.copy()

train_device.device2 = train_device.device2.fillna(1)

train_device1 = train_device.groupby(['UID', 'device2'], as_index=False).count()
train_device1 = train_device1.groupby(['UID'], as_index=False)['device2'].agg({'device_count': 'count'})
train_op_feature = pd.merge(train_op_feature, train_device1, on='UID', how='left')

train_device_day = train_device.groupby(['UID', 'day', 'device2'], as_index=False).count()
train_device_day = train_device_day.groupby(['UID'], as_index=False)['device2'].agg({'device_day': 'count'})
train_op_feature = pd.merge(train_op_feature, train_device_day, on='UID', how='left')

# # mac1分析的个数

train_op_trs[train_op_trs.UID == 10000]

'''统计用户mac地址的个数'''
train_mac = train_op_trs.copy()
train_mac.mac1 = train_mac.mac1.fillna(0)
train_mac = train_mac.groupby(['UID', 'mac1'], as_index=False)['day'].agg({'mac_count': 'count'})
train_mac['ratio'] = np.where(train_mac.mac1 == 0, 0, 1)
train_mac_count = train_mac.groupby('UID', as_index=False)['mac1'].count()
train_op_feature = pd.merge(train_op_feature, train_mac_count, on='UID', how='left')

'''统计mac地址的缺失率'''
train_mac_loss = train_op_trs.groupby('UID', as_index=False).count()
train_mac_loss['mac_loss'] = train_mac_loss.mac1 / train_mac_loss.day
train_mac_loss = train_mac_loss[['UID', 'mac_loss']]
train_op_feature = pd.merge(train_op_feature, train_mac_loss, on='UID', how='left')

# # weifi分析,统计weifi个数

'''wifi的个数以及每天不同的wifi的个数'''
train_wifi = train_op_trs.copy()
train_wifi.wifi = train_wifi.wifi.fillna(0)

train_wifi1 = train_wifi.drop_duplicates(subset=['UID', 'wifi'])
train_wifi1 = train_wifi1.groupby('UID', as_index=False).wifi.count()
train_op_feature = pd.merge(train_op_feature, train_wifi1, on='UID', how='left')
'''平均一天的操作系统的个数'''
train_wifi_day = train_wifi.drop_duplicates(subset=['UID', 'day', 'wifi'])
train_wifi_day = train_wifi_day.groupby('UID', as_index=False).wifi.agg({'wifi_day': 'count'})
train_op_feature = pd.merge(train_op_feature, train_wifi_day, on='UID', how='left')

train_is_wefi = train_op_trs.copy()
train_is_wefi.wifi = train_is_wefi.wifi.fillna(0)
train_is_wefi['wefi_loss'] = np.where(train_is_wefi.wifi == 0, 0, 1)

funcs = lambda x: x[x == 0].count() / x.count()
train_is_wefi = train_is_wefi.groupby('UID', as_index=False).agg({'wefi_loss': funcs})
train_op_feature = pd.merge(train_op_feature, train_is_wefi, on='UID', how='left')

# # geo_code地理位置的分析与处理,提取每天地理位置的变化情况，以及地理位置缺失率

'''前两位地理信息是否在wu和wd'''
train_geo_wud = train_op_trs.copy()
train_geo_wud.geo_code = train_geo_wud.geo_code.str[:2]


def fucs_geo_code_wu(x):
    temp = x.values.tolist()
    if ('wu' in temp):
        return 1
    else:
        return 0


def fucs_geo_code_wd(x):
    temp = x.values.tolist()
    if ('wd' in temp):
        return 1
    else:
        return 0


train_geo_wu = train_geo_wud.groupby('UID')['geo_code'].apply(fucs_geo_code_wu).reset_index()
train_geo_wu.columns = ['UID', 'geo_code_wu']
train_geo_wd = train_geo_wud.groupby('UID')['geo_code'].apply(fucs_geo_code_wd).reset_index()
train_geo_wd.columns = ['UID', 'geo_code_wd']
train_op_feature = train_op_feature.merge(train_geo_wu, on='UID', how='left')
train_op_feature = train_op_feature.merge(train_geo_wd, on='UID', how='left')

train_geo = train_op_trs.copy()
train_geo.geo_code = train_geo.geo_code.fillna(0)
train_geo = train_geo.drop_duplicates(subset=['UID', 'day', 'geo_code'], keep='first')
train_geo = train_geo.groupby(['UID', 'day'], as_index=False)['geo_code'].count()
train_geo = train_geo.groupby('UID', as_index=False).agg({'day': 'count', 'geo_code': 'sum'})
train_geo['avg_day_geo'] = train_geo['geo_code'] / train_geo['day']
train_geo.drop(['day', 'geo_code'], axis=1, inplace=True)
train_op_feature = pd.merge(train_op_feature, train_geo, on='UID', how='left')

train_is_geo = train_op_trs.copy()
train_is_geo.geo_code = train_is_geo.geo_code.fillna(0)
train_is_geo['geo_loss'] = np.where(train_is_geo.geo_code == 0, 0, 1)

funcs = lambda x: x[x == 0].count() / x.count()
train_is_geo = train_is_geo.groupby('UID', as_index=False).agg({'geo_loss': funcs})
train_op_feature = pd.merge(train_op_feature, train_is_geo, on='UID', how='left')

# # 平均化特征

'''对特征进行天数的平均话操作'''
avg_clos = ['mode_-1', 'mode_1', 'mode_2',
            'mode_3', 'mode_4', 'mode_5', 'mode_6', 'mode_7', 'mode_8', 'mode_9',
            'mode_10', 'mode_11', 'mode_12', 'mode_13', 'mode_14', 'mode_15',
            'success_0', 'success_1', 'success_2', 'time_op_0', 'time_op_1',
            'time_op_2', 'time_op_3', 'os', 'os_day', 'device_count',
            'device_day', 'mac1',
            'wifi', 'wifi_day']

for i in avg_clos:
    train_op_feature['avg_op_{}'.format(i)] = train_op_feature[i] / train_op_feature['op_count']
    train_op_feature['avg_op_day_{}'.format(i)] = train_op_feature[i] / train_op_feature['day_count_op']

# # 特征的保存
train_op_feature.to_csv(features_base_path + 'train_op_feature.csv', index=False, encoding='utf-8')

# transction table anlysis and extract
import warnings

warnings.filterwarnings('ignore')
from tiancheng.base.base_helper import *

train_tst = get_transaction_train_new()
test_tst_rd = get_transaction_round1_new()

tag = get_tag_train_new()
test_tst_rd[tag_hd.Tag] = -1
train_tst = pd.merge(train_tst, tag, on='UID', how='left')

'''合并训练测试集'''
train_tsts = pd.concat([train_tst, test_tst_rd], axis=0).reset_index(drop=True)

# # 数据异常的清除,删除channel为118的

train_tsts = train_tsts[train_tsts.channel != 118].reset_index(drop=True)

train_tst_feature = pd.DataFrame({'UID': [i for i in train_tsts.UID.unique()]})
print(train_tst_feature.shape, train_tsts.shape)
train_tst_feature = train_tst_feature.merge(train_tsts.drop_duplicates(['UID', 'Tag'])[['UID', 'Tag']],
                                            on='UID', how='left')
print(train_tst_feature.shape, train_tsts.shape)


# # channel是否为119
def fucs_channel(x):
    temp = x.values.tolist()
    if (119 in temp):
        return 1
    else:
        return 0


train_channel = train_tsts.groupby('UID')['channel'].apply(fucs_channel).reset_index()

train_channel.columns = ['UID', 'channel_119']
train_tst_feature = train_tst_feature.merge(train_channel, on='UID', how='left')

# # trans_amt, bal的处理

'''交易金额是否大于3000000'''


def fucs_trans_amt(x):
    temp = np.max(x.values.tolist())
    if temp > 3000000:
        return 1
    else:
        return 0


train_trans_amt = train_tsts.groupby('UID')['trans_amt'].apply(fucs_trans_amt).reset_index()
train_trans_amt.columns = ['UID', 'trans_amt_than_119']


def fucs_trans_amt_102(x):
    temp = x.values.tolist()
    if 102 in temp:
        return 1
    else:
        return 0


train_trans_amt_102 = train_tsts.groupby('UID')['trans_amt'].apply(fucs_trans_amt_102).reset_index().rename(columns=
    {"trans_amt": 'trans_amt_102'})

train_tst_feature = train_tst_feature.merge(train_trans_amt, on='UID', how='left')
train_tst_feature = train_tst_feature.merge(train_trans_amt_102, on='UID', how='left')


def fucs_bal_big(x):
    temp = np.max(x.values.tolist())
    if temp > 3200000:
        return 1
    else:
        return 0


train_bal_big = train_tsts.groupby('UID')['bal'].apply(fucs_bal_big).reset_index()

train_bal_big.columns = ['UID','trans_bal_big']

def fucs_bal_100(x):
    temp = x.values.tolist()
    if 100 in temp:
        return 1
    else:
        return 0


train_bal_100 = train_tsts.groupby('UID')['bal'].apply(fucs_bal_100).reset_index().rename(columns=
    {"bal": 'bal_100'})
train_bal_100.columns = ['UID','bal_100']

def fucs_bal_100_260(x):
    temp = x.values.tolist()
    count = 0
    for i in temp:
        if i > 100 & i < 260:
            count += 1
    ratio = count / len(temp)
    return ratio


train_bal_100_260 = train_tsts.groupby('UID')['bal'].apply(fucs_bal_100_260).reset_index()
train_bal_100_260.columns = ["UID",'bal_100_260']
train_tst_feature = train_tst_feature.merge(train_bal_big, on='UID', how='left')
train_tst_feature = train_tst_feature.merge(train_bal_100, on='UID', how='left')
train_tst_feature = train_tst_feature.merge(train_bal_100_260, on='UID', how='left')


# # 是不是1，8以及14，12，以及26，27，28

def fucs_1(x):
    temp = x.values.tolist()
    if (1 in temp):
        return 1
    else:
        return 0


def fucs_8(x):
    temp = x.values.tolist()
    if (8 in temp):
        return 1
    else:
        return 0


def fucs_12(x):
    temp = x.values.tolist()
    if (12 in temp):
        return 1
    else:
        return 0


def fucs_14(x):
    temp = x.values.tolist()
    if (14 in temp):
        return 1
    else:
        return 0


def fucs_26(x):
    temp = x.values.tolist()
    if (26 in temp):
        return 1
    else:
        return 0


def fucs_27(x):
    temp = x.values.tolist()
    if (27 in temp):
        return 1
    else:
        return 0


def fucs_28(x):
    temp = x.values.tolist()
    if (28 in temp):
        return 1
    else:
        return 0


day_1 = train_tsts.groupby('UID')['day'].apply(fucs_1).reset_index().rename(columns={"day": 'day_is_1'})
day_8 = train_tsts.groupby('UID')['day'].apply(fucs_8).reset_index().rename(columns={"day": 'day_is_8'})
day_12 = train_tsts.groupby('UID')['day'].apply(fucs_12).reset_index().rename(columns={"day": 'day_is_12'})
day_14 = train_tsts.groupby('UID')['day'].apply(fucs_14).reset_index().rename(columns={"day": 'day_is_14'})
day_26 = train_tsts.groupby('UID')['day'].apply(fucs_26).reset_index().rename(columns={"day": 'day_is_26'})
day_27 = train_tsts.groupby('UID')['day'].apply(fucs_27).reset_index().rename(columns={"day": 'day_is_27'})
day_28 = train_tsts.groupby('UID')['day'].apply(fucs_28).reset_index().rename(columns={"day": 'day_is_28'})
train_tst_feature = train_tst_feature.merge(day_1, on='UID', how='left')
train_tst_feature = train_tst_feature.merge(day_8, on='UID', how='left')
train_tst_feature = train_tst_feature.merge(day_12, on='UID', how='left')
train_tst_feature = train_tst_feature.merge(day_14, on='UID', how='left')
train_tst_feature = train_tst_feature.merge(day_26, on='UID', how='left')
train_tst_feature = train_tst_feature.merge(day_27, on='UID', how='left')
train_tst_feature = train_tst_feature.merge(day_28, on='UID', how='left')

# # 交易天的个数统计,平均每天的操作次数，每个时间段的操作次数
train_tst_day = train_tsts.copy()

'''每个用户的总共交易次数，以及每个用户平均每天的交易次数'''
train_tst_feature = pd.merge(train_tst_feature,
                             train_tst_day.groupby('UID', as_index=False)['day'].agg({'trans_count': 'count'}))
train_tst_day.drop_duplicates(['UID', 'day'], keep='first', inplace=True)
train_tst_day = train_tst_day.groupby('UID', as_index=False)['day'].agg({'day_count': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_tst_day, on='UID', how='left')

train_tst_feature['avg_day_trans'] = train_tst_feature.trans_count / train_tst_feature.day_count

'''统计每个不同时间段的交易次数，以及每个用户每个时间段的平均交易次数'''
train_time = train_tsts.copy()
train_time.time = train_time.time.str[:2]

train_time.time = train_time.time.astype('int')


def time_help(data):
    if (data > 7) & (data <= 12):
        return 0
    elif (data > 12) & (data <= 19):
        return 1
    elif (data > 19) & (data <= 24):
        return 2
    elif (data >= 0) & (data <= 7):
        return 3


train_time.time = train_time.time.map(time_help)
train_time = pd.concat([train_time.UID, pd.get_dummies(train_time.time, prefix='time')])
train_time = train_time.groupby('UID', as_index=False).sum()
train_tst_feature = pd.merge(train_tst_feature, train_time, on='UID', how='left')

time_func = lambda x: 0 if x == np.inf else x
train_tst_feature['avg_time0_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_0).map(time_func)
train_tst_feature['avg_time1_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_1).map(time_func)
train_tst_feature['avg_time2_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_2).map(time_func)
train_tst_feature['avg_time3_trans'] = (train_tst_feature.trans_count / train_tst_feature.time_3).map(time_func)

# # 取每个用户的相邻两次交易天数差值的平均值，最大值，最大值

train_time_sub = train_tsts.copy()
train_time_sub.drop_duplicates(['UID', 'day', 'time'], inplace=True)
train_time_sub.sort_values(by=['UID', 'day', 'time'], inplace=True)

train_time_sub['day_shift'] = train_time_sub.groupby('UID')['day'].shift(-1)
train_time_sub['time_shift'] = train_time_sub.groupby(['UID', 'day'])['time'].shift(-1)

train_time_sub = train_time_sub[['UID', 'day', 'time', 'day_shift', 'time_shift']]
train_time_sub['day_sub'] = train_time_sub['day_shift'] - train_time_sub['day']
train_time_sub['time_sub'] = (pd.to_datetime(train_time_sub.time_shift, format='%H:%M:%S')
                              - pd.to_datetime(train_time_sub.time, format='%H:%M:%S')).dt.total_seconds()
train_day_sub = train_time_sub.copy()
train_day_sub['day_sub'] = train_time_sub['day_sub'].fillna(0)
train_day_sub = train_day_sub.groupby('UID', as_index=False)['day_sub'].agg({'mean', 'max', 'min'}).add_prefix(
    'day_sub_').reset_index()
train_tst_feature = pd.merge(train_tst_feature, train_day_sub, on='UID', how='left')

train_time_sub = train_time_sub[train_time_sub.time_sub.notnull()]

train_time_sub = train_time_sub.groupby('UID', as_index=False)['time_sub'].agg({'mean', 'max', 'min'}).add_prefix(
    'time_sub_').reset_index()
train_tst_feature = pd.merge(train_tst_feature, train_time_sub, on='UID', how='left')
train_tst_feature[['time_sub_mean', 'time_sub_min', 'time_sub_max']] = train_tst_feature[['time_sub_mean',
                                                                                          'time_sub_min',
                                                                                          'time_sub_max']].fillna(0)

# # 交易平台的个数，以及每个平台的交易金额的大小trans_amt的统计信息值

'''交易平台的个数以及平均每天使用不同平台的个数'''
train_channel_count = train_tsts.copy()
train_channel_count.drop_duplicates(['UID', 'channel'], inplace=True)
train_channel_count = train_channel_count.groupby('UID', as_index=False)['channel'].agg({'channel_count': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_channel_count, on='UID', how='left')

'''平均每天使用的平台个数'''
train_avg_channel = train_tsts.copy()
train_avg_channel.drop_duplicates(['UID', 'day', 'channel'], inplace=True)
train_avg_channel = train_avg_channel.groupby('UID', as_index=False)['channel'].agg({'channel_day_count': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_avg_channel, on='UID', how='left')

'''每个平台的交易金额的统计信息值'''
train_channel_amt = train_tsts.copy()
train_channel_amt = train_channel_amt.groupby(['UID', 'channel'])['trans_amt'].agg({'mean', 'max', 'min'}).add_prefix(
    'amt_channel_').unstack('channel')
train_channel_amt.columns = [x[0] + "_" + str(x[1]) for x in train_channel_amt.columns.ravel()]
train_channel_amt.fillna(0, inplace=True)
train_channel_amt.reset_index(inplace=True)
train_tst_feature = pd.merge(train_tst_feature, train_channel_amt, on='UID', how='left')

# # 交易金额和余额的大小trans_amt，bal的统计分析

'''每个用户的钱的大小'''
train_tst_feature = train_tst_feature.merge(train_tsts.groupby('UID', as_index=False)['trans_amt']
                                            .agg({'trans_amt_all': 'sum'}), on='UID', how='left')
'''每个用户的余额大小'''
train_tst_feature = train_tst_feature.merge(train_tsts.groupby('UID', as_index=False)['bal']
                                            .agg({'bal_all': 'sum'}), on='UID', how='left')

# # 营销活动的相关特征，每天参与营销活动的类型，一共参与的营销活动，每个营销类型的金额相关特征

'''平均每天参与的营销活动次数'''
train_market_code = train_tsts.copy()
train_market_code['market_code'].fillna(0, inplace=True)
train_market_code.drop_duplicates(['UID', 'day', 'market_code'], inplace=True)
train_market_code = train_market_code.groupby('UID', as_index=False)['market_code'].agg({'mark_code_day': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_market_code, on='UID', how='left')

'''平均每天参与不同营销活动的次数'''
train_market_code_diff = train_tsts.copy()
train_market_code_diff['market_code'].fillna(0, inplace=True)
train_market_code_diff.drop_duplicates(['UID', 'market_code'], inplace=True)
train_market_code_diff = train_market_code_diff.groupby('UID', as_index=False)['market_code'].agg(
    {'mark_code_count': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_market_code_diff, on='UID', how='left')

'''不同的营销类型的金额统计信息'''
train_market_type = train_tsts.copy()
train_market_type['market_type'].fillna(0, inplace=True)
train_market_type = train_market_type.groupby(['UID', 'market_type'])['trans_amt'].agg(
    {'mean', 'max', 'min'}).add_prefix('market_type_').unstack()
train_market_type.columns = [x[0] + "_" + str(x[1]) for x in train_market_type.columns.ravel()]
train_market_type.fillna(0, inplace=True)
train_market_type.reset_index(inplace=True)
train_tst_feature = pd.merge(train_tst_feature, train_market_type, on='UID', how='left')

# # 交易类型的统计分析以及和交易金额的大小之间的关系信息

'''平均每天交易类型1,2的次数'''
train_type_1 = train_tsts.copy()
train_type_1.drop_duplicates(['UID', 'day', 'trans_type1'], inplace=True)
train_type_1 = train_type_1.groupby('UID', as_index=False)['trans_type1'].agg({'trans_type1_count': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_type_1, on='UID', how='left')

train_type_2 = train_tsts.copy()
train_type_2.drop_duplicates(['UID', 'day', 'trans_type2'], inplace=True)
train_type_2 = train_type_2.groupby('UID', as_index=False)['trans_type2'].agg({'trans_type2_count': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_type_2, on='UID', how='left')

'''每个用户的交类型2的金额统计信息'''
train_type2_loss = train_tsts.copy()
train_type2_loss['trans_type2'].fillna(0, inplace=True)
train_type2_loss = train_type2_loss.groupby(['UID'], as_index=False)['trans_type2'].agg({
    'trans_type_loss': lambda x: x[x == 0].count() / x.count()})
train_tst_feature = train_tst_feature.merge(train_type2_loss, on='UID', how='left')

# # acc_id1分析

'''用户帐号相关accid1'''
train_tst_accid1 = train_tsts.copy()
train_tst_accid1.fillna(0, inplace=True)
train_tst_feature = train_tst_feature.merge(train_tst_accid1.groupby('UID', as_index=False)['acc_id1'].agg({
    'acc_id1_loss': lambda x: x[x == 0].count() / x.count()
}))
train_tst_accid1.drop_duplicates(['UID', 'acc_id1'], inplace=True)
train_tst_accid1 = train_tst_accid1.groupby('UID', as_index=False)['acc_id1'].agg({'acc_id1_count': 'count'})
train_tst_feature = train_tst_feature.merge(train_tst_accid1, on='UID', how='left')

train_tst_accid1_day = train_tsts.copy()
train_tst_accid1_day.fillna(0, inplace=True)
train_tst_accid1_day.drop_duplicates(['UID', 'day', 'acc_id1'], inplace=True)
train_tst_accid1_day = train_tst_accid1_day.groupby('UID', as_index=False)['acc_id1'].agg({'acc_id1_day': 'count'})
train_tst_feature = train_tst_feature.merge(train_tst_accid1_day, on='UID', how='left')

'''用户帐号相关accid2'''
train_tst_accid2 = train_tsts.copy()
train_tst_accid2.fillna(0, inplace=True)
train_tst_accid2.drop_duplicates(['UID', 'acc_id2'], inplace=True)
train_tst_accid2 = train_tst_accid2.groupby('UID', as_index=False)['acc_id2'].agg({'acc_id2_count': 'count'})
train_tst_feature = train_tst_feature.merge(train_tst_accid2, on='UID', how='left')

train_tst_accid2_day = train_tsts.copy()
train_tst_accid2_day.fillna(0, inplace=True)
train_tst_accid2_day.drop_duplicates(['UID', 'day', 'acc_id2'], inplace=True)
train_tst_accid2_day = train_tst_accid2_day.groupby('UID', as_index=False)['acc_id2'].agg({'acc_id2_day': 'count'})
train_tst_feature = train_tst_feature.merge(train_tst_accid2_day, on='UID', how='left')

'''用户帐号相关accid3'''
train_tst_accid3 = train_tsts.copy()
train_tst_accid3.fillna(0, inplace=True)
train_tst_accid3.drop_duplicates(['UID', 'acc_id3'], inplace=True)
train_tst_accid3 = train_tst_accid3.groupby('UID', as_index=False)['acc_id3'].agg({'acc_id3_count': 'count'})
train_tst_feature = train_tst_feature.merge(train_tst_accid3, on='UID', how='left')

train_tst_accid3_day = train_tsts.copy()
train_tst_accid3_day.fillna(0, inplace=True)
train_tst_accid3_day.drop_duplicates(['UID', 'day', 'acc_id3'], inplace=True)
train_tst_accid3_day = train_tst_accid3_day.groupby('UID', as_index=False)['acc_id3'].agg({'acc_id3_day': 'count'})
train_tst_feature = train_tst_feature.merge(train_tst_accid3_day, on='UID', how='left')

# # 交易资金类型amt_src1

train_amt_src = train_tsts.copy()
train_tst_feature = pd.merge(train_tst_feature, train_amt_src.groupby('UID', as_index=False)['amt_src1'].count())
train_amt_src.drop_duplicates(['UID', 'day', 'amt_src1'], inplace=True)
train_amt_src = train_amt_src.groupby('UID', as_index=False)['amt_src1'].agg({'amt_src_count': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_amt_src, on='UID', how='left')

# # 商家的信息统计方法与分析，merchant, code1, code2
'''商家的个数，平均每天逛商家的个数'''
train_merchant = train_tsts.copy()
train_tst_feature = pd.merge(train_tst_feature, train_merchant.groupby('UID', as_index=False)['merchant'].count())
train_merchant.drop_duplicates(['UID', 'day', 'merchant'], inplace=True)
train_merchant = train_merchant.groupby('UID', as_index=False)['merchant'].agg({'merchant_count': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_merchant, on='UID', how='left')

temp1 = train_tst.groupby(['UID', 'merchant'], as_index=False)['trans_amt'].agg({'sum', 'mean'}).add_prefix(
    'merchant_').reset_index()
temp1['trans_mer_number'] = temp1['merchant_sum'] / temp1['merchant_mean']
temp1 = temp1.groupby('UID').agg({'mean', 'max', 'min', 'sum'})
temp1.columns = [x[0] + '_' + x[1] for x in temp1.columns.ravel()]
temp1.reset_index(inplace=True)
train_tst_feature = pd.merge(train_tst_feature, temp1, on='UID', how='left')

train_tst_feature.columns

# # 交易表中地理位置信息分析
'''平均每天变动的地理位置'''
train_tst_geo = train_tsts.copy()
train_tst_geo.fillna(0, inplace=True)
train_tst_geo.drop_duplicates(['UID', 'geo_code'], inplace=True)
train_tst_geo = train_tst_geo.groupby('UID', as_index=False)['geo_code'].agg({'geo_code_count': 'count'})
train_tst_feature = train_tst_feature.merge(train_tst_geo, on='UID', how='left')

train_tst_geo_day = train_tsts.copy()
train_tst_geo_day.geo_code.fillna(0)
train_tst_geo_day = train_tst_geo_day.drop_duplicates(subset=['UID', 'day', 'geo_code'], keep='first')
train_tst_geo_day = train_tst_geo_day.groupby(['UID'], as_index=False)['geo_code'].agg({'geo_code_day': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_tst_geo_day, on='UID', how='left')

# # 交易表的设备特征提取

'''一个用户用了几个设置进行交易, 以及没多少次交易换设备，以及平均每天设备个数'''
train_device = train_tsts.copy()
train_device.device2 = train_device.device2.fillna(0)
train_tst_feature = pd.merge(train_tst_feature,
                             train_device.groupby('UID', as_index=False)['device2'].agg({'device_tst': 'count'}))
train_device.drop_duplicates(['UID', 'day', 'device2'], inplace=True)
train_device = train_device.groupby(['UID'], as_index=False)['device2'].agg({'device_count_tst': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_device, on='UID', how='left')

# # mac1地址的统计分析
'''统计用户mac地址的个数'''
train_mac = train_tsts.copy()
train_mac.mac1 = train_mac.mac1.fillna(0)

train_mac.drop_duplicates(['UID', 'mac1'], inplace=True)
train_mac = train_mac.groupby('UID', as_index=False)['mac1'].agg({'mac1_tst': 'count'})
train_tst_feature = train_tst_feature.merge(train_mac, on='UID', how='left')

train_mac_day = train_tsts.copy()
train_mac_day.mac1.fillna(0, inplace=True)
train_mac_day.drop_duplicates(['UID', 'day', 'mac1'], inplace=True)
train_mac_day = train_mac_day.groupby('UID', as_index=False)['mac1'].agg({'mac1_day': 'count'})
train_tst_feature = train_tst_feature.merge(train_mac_day, on='UID', how='left')

# # 交易表中交易是否为苹果手机
train_apple = train_tsts.copy()
train_apple.device_code3 = train_apple.device_code3.fillna(0)
train_apple.device_code1 = train_apple.device_code1.fillna(0)

train_apple.device_code3 = train_apple.device_code3.apply(lambda x: x if x == 0 else 1)
train_apple.device_code1 = train_apple.device_code1.apply(lambda x: x if x == 0 else 1)

train_apple = train_apple.groupby('UID', as_index=False)[['device_code1', 'device_code3']].sum()
train_apple['is_apple_tst'] = np.where(train_apple.device_code1 > train_apple.device_code3, 0, 1)
train_apple['is_apple_an_tst'] = np.where((train_apple.device_code1 != 0) & (train_apple.device_code3 != 0), 1, 0)
train_apple.drop(['device_code1', 'device_code3'], axis=1, inplace=True)
train_tst_feature = pd.merge(train_tst_feature, train_apple, on='UID', how='left')

# # ip地址信息的相关统计分析ip1,ip1_sub
'''平均每天换多少个ip'''
train_tst_ip = train_tsts.copy()
train_tst_ip['ip1'] = train_tst_ip['ip1'].fillna(0)
train_tst_feature = train_tst_feature.merge(train_tst_ip.groupby('UID', as_index=False)['ip1'].agg({
    'ip1_loss': lambda x: x[x == 0].count() / x.count()
}))

train_tst_ip.drop_duplicates(['UID', 'day', 'ip1'], inplace=True)
train_tst_ip = train_tst_ip.groupby('UID', as_index=False)['ip1'].agg({'ip1_day_tst': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_tst_ip, on='UID', how='left')

'''每多少次交易换一次ip'''
train_ip_trans = train_tsts.copy()
train_ip_trans['ip1'] = train_ip_trans['ip1'].fillna(0)
train_ip_trans = train_ip_trans.groupby('UID', as_index=False)['ip1'].agg({'ip1_count_tst': 'count'})
train_tst_feature = pd.merge(train_tst_feature, train_ip_trans, on='UID', how='left')

# # 平均化特征处理

cols1 = ['channel_count', 'channel_day_count', 'amt_channel_mean_102',
         'amt_channel_mean_106', 'amt_channel_mean_118', 'amt_channel_mean_119',
         'trans_amt_all', 'bal_all', 'mark_code_day', 'mark_code_count', 'market_type_mean_0.0',
         'market_type_mean_1.0', 'market_type_mean_2.0', 'trans_type2_count', 'acc_id1_count',
         'acc_id1_day', 'acc_id2_count', 'acc_id2_day', 'acc_id3_count',
         'acc_id3_day', 'amt_src1', 'amt_src_count', 'merchant', 'merchant_count',
         'merchant_mean_mean', 'merchant_mean_sum',
         'merchant_mean_max', 'merchant_mean_min', 'merchant_sum_mean',
         'merchant_sum_sum', 'merchant_sum_max', 'merchant_sum_min',
         'trans_mer_number_mean', 'trans_mer_number_sum', 'trans_mer_number_max',
         'trans_mer_number_min', 'geo_code_count', 'geo_code_day', 'device_tst',
         'device_count_tst', 'mac1_tst', 'mac1_day', 'ip1_day_tst', 'ip1_count_tst'
         ]
for i in cols1:
    train_tst_feature['avg_trans_{}'.format(i)] = train_tst_feature[i] / train_tst_feature['trans_count']
    train_tst_feature['avg_trans_day_{}'.format(i)] = train_tst_feature[i] / train_tst_feature['day_count']

train_tst_feature['avg_trans_mat_bal'] = train_tst_feature['trans_amt_all'] / train_tst_feature['bal_all']

cols2 = ['channel_count', 'channel_day_count', 'mark_code_day', 'mark_code_count',
         'trans_type1_count', 'amt_src1', 'amt_src_count', 'merchant',
         'merchant_count', 'device_tst', 'device_count_tst', 'trans_type2_count']

for j in cols2:
    train_tst_feature['avg_trans_amt_{}'.format(i)] = train_tst_feature['trans_amt_all'] / train_tst_feature[i]
    train_tst_feature['avg_trans_bal_{}'.format(i)] = train_tst_feature['bal_all'] / train_tst_feature[i]

train_tst_feature.to_csv(features_base_path + 'train_tst_feature.csv', index=False, encoding='utf-8')
