#!/usr/bin/env python
# coding=utf-8
# Filename : Book.py
# Created by iFantastic on 2017/7/23
# Description : python大战机器学习书中的例子
import pandas as pd
import numpy as np


# 数据清洗
people = pd.read_csv('input/people.csv', sep=',', header=0, keep_default_na=True, parse_dates=['date'])
people.set_index(keys=['people_id'], drop=True, append=False, inplace=True)
act_train = pd.read_csv('input/act_train.csv', sep=',', header=0, keep_default_na=True, parse_dates=['date'])
act_train.set_index(keys=['people_id'], drop=True, append=False, inplace=True)
act_test = pd.read_csv('input/act_train.csv', sep=',', header=0, keep_default_na=True, parse_dates=['date'])
act_test.set_index(keys=['people_id'], drop=True, append=False, inplace=True)

train_data = act_train.merge(people, how='left', left_index=True, right_index=True, suffixes=('_act', '_people'))
test_data = act_train.merge(people, how='left', left_index=True, right_index=True, suffixes=('_act', '_people'))

# print train_data.activity_category.value_counts()
# activity_category属性下type1的activity和非type1的activity是不同的，所以要拆分数据
types = ['type %d' %i for i in range(1, 8)]
train_datas = {}
test_datas = {}
for _type in types:
    train_datas[_type] = train_data[train_data.activity_category == _type].dropna(axis=(0, 1), how='all')
    test_datas[_type] = test_data[test_data.activity_category == _type].dropna(axis=(0, 1), how='all')
    # print(train_datas[_type].activity_category.unique())
    # print(test_datas[_type].activity_category.unique())
    # 对于各自的数据集该项属性都一样，冗余去掉
    train_datas[_type].drop(['activity_category'], axis=1, inplace=True)
    test_datas[_type].drop(['activity_category'], axis=1, inplace=True)
    # 建立(people_id, activity_id)唯一索引
    train_datas[_type].set_index(keys=['activity_id'], drop=True, append=True, inplace=True)
    test_datas[_type].set_index(keys=['activity_id'], drop=True, append=True, inplace=True)
    # print(train_datas[_type].index.is_unique)
    # print(test_datas[_type].index.is_unique)

# 数据类型转换
str_col_list = ['group_1']+['char_%d_act' %i for i in range(1, 11)]+['char_%d_people' %i for i in range(1, 10)]
bool_col_list = ['char_10_people']+['char_%d' %i for i in range(11, 38)]
for _type in types:
    for data_set in [train_datas, test_datas]:
        data_set[_type].date_act = (data_set[_type].date_act - np.datetime64('1970-01-01')) / np.timedelta64(1, 'D')
        data_set[_type].date_people = (data_set[_type].date_people - np.datetime64('1970-01-01')) / np.timedelta64(1, 'D')
        data_set[_type].group_1 = data_set[_type].group_1.str.replace('group', '').str.strip().astype(np.float64)
        for col in bool_col_list:
            if col in data_set[_type]:
                data_set[_type][col] = data_set[_type][col].astype(np.float64)
        for col in str_col_list[1:]:
            if col in data_set[_type]:
                data_set[_type][col] = data_set[_type][col].str.replace('type', '').str.strip().astype(np.float64)
        data_set[_type] = data_set[_type].astype(np.float64)


# 数据预处理
lambda_len = lambda x: len(x.unique())
lambda_data = lambda x: str(x.unique()) if(len(x.unique()) <= 3) else str(x.unique()[:3])+'...'
train_results = {}
test_results = {}
for _type in types:
    train_results[_type[-1]] = pd.DataFrame({'len': train_datas[_type].apply(lambda_len),
                                             'data': train_datas[_type].apply(lambda_data)},
                                            index=train_datas[_type].columns)
    test_results[_type[-1]] = pd.DataFrame({'len': test_datas[_type].apply(lambda_len),
                                          'data': test_datas[_type].apply(lambda_data)},
                                         index=test_datas[_type].columns)

train_12 = train_results['1'].merge(train_results['2'], how='outer', left_index=True, right_index=True, suffixes=('_ta_1', '_ta_2'))
train_34 = train_results['3'].merge(train_results['4'], how='outer', left_index=True, right_index=True, suffixes=('_ta_3', '_ta_4'))
train_56 = train_results['5'].merge(train_results['6'], how='outer', left_index=True, right_index=True, suffixes=('_ta_5', '_ta_6'))
train_test77 = train_results['7'].merge(test_results['7'], how='outer', left_index=True, right_index=True, suffixes=('_ta_5', '_ta_6'))
test_12 = test_results['1'].merge(test_results['2'], how='outer', left_index=True, right_index=True, suffixes=('_tt_1', '__tt_2'))
test_34 = test_results['3'].merge(test_results['4'], how='outer', left_index=True, right_index=True, suffixes=('_tt_3', '__tt_4'))
test_56 = test_results['5'].merge(test_results['6'], how='outer', left_index=True, right_index=True, suffixes=('_tt_5', '__tt_6'))

train_12.merge(train_34, how='outer', left_index=True, right_index=True)\
    .merge(train_56, how='outer', left_index=True, right_index=True)\
    .merge(train_test77, how='outer', left_index=True, right_index=True)\
    .merge(test_12, how='outer', left_index=True, right_index=True)\
    .merge(test_34, how='outer', left_index=True, right_index=True)\
    .merge(test_56, how='outer', left_index=True, right_index=True)


# 独热码编码，增加特征数量
from scipy.sparse import hstack, csr_matrix
from sklearn.preprocessing import OneHotEncoder


def onehot_encoder(train_datas, test_datas):
    train_results = {}
    test_results = {}
    types = ['type %d' %i for i in range(1, 8)]
    for _type in types:
        if _type == 'type 1':
            one_hot_cols = ['char_%d_act' %i for i in range(1, 10)]+['char_%d_people' %i for i in range(1, 10)]
            train_end_cols = ['group_1', 'date_act', 'date_people', 'char_38', 'outcome']
            test_end_cols = ['group_1', 'date_act', 'date_people', 'char_38']
        else:
            one_hot_cols = ['char_%d_people' %i for i in range(1, 10)]
            train_end_cols = ['group_1', 'char_10_act', 'date_act', 'date_people', 'char_38', 'outcome']
            test_end_cols = ['group_1', 'char_10_act', 'date_act', 'date_people', 'char_38']

        train_front_array = train_datas[_type][one_hot_cols].values  # 头部数组
        train_end_array = train_datas[_type][train_end_cols].values  # 末尾数组
        train_middle_array = train_datas[_type].drop(train_end_cols+one_hot_cols, axis=1, inplace=False)  # 中间数组

        test_front_array = test_datas[_type][one_hot_cols].values  # 头部数组
        test_end_array = test_datas[_type][test_end_cols].values  # 末尾数组
        test_middle_array = test_datas[_type].drop(test_end_cols+one_hot_cols, axis=1, inplace=False).values  # 中间数组

        encoder = OneHotEncoder(categorical_features='all', sparse=True)
        # 一个稀疏矩阵，类型为csr_matrix
        train_result = hstack([encoder.fit_transform(train_front_array), csr_matrix(train_middle_array), csr_matrix(train_end_array)])
        test_result = hstack([encoder.fit_transform(test_front_array), csr_matrix(test_middle_array), csr_matrix(test_end_array)])
        train_results[_type] = train_result
        test_results[_type] = test_result
    return train_datas, test_results

print('before encode:\n')
for _type in types:
    print('train(type=%s):shape=' % _type, train_datas[_type].shape)
    print('test(type=%s):shape=' % _type, test_datas[_type].shape)
print('===============\n\n')
train_results, test_results = onehot_encoder(train_datas, test_datas)
print('after encode:\n')
for _type in types:
    print('train(type=%s):shape=' % _type, train_results[_type].shape)
    print('test(type=%s):shape=' % _type, test_results[_type].shape)
print('===============\n\n')

# 归一化处理
from sklearn.preprocessing import MaxAbsScaler


def scale(train_datas, test_datas):
    train_results = {}
    test_results = {}
    types = ['type %d' %i for i in range(1, 8)]

    for _type in types:
        if _type == 'type 1':
            train_last_index = 5  # 最后5列为group_1/date_act/date_people/char_38/outcome
            test_last_index = 4  # 最后4列为group_1/date_act/date_people/char_38
        else:
            train_last_index = 6  # 最后6列为group_1/char_10_act/date_act/date_people/char_38/outcome

        scaler = MaxAbsScaler()
        train_array = np.array(train_datas[_type])
        train_front = train_array[:, :-train_last_index]
        train_mid = scaler.fit_transform(train_array[:, -train_last_index:1])  # outcome不需要归一化
        train_end = train_array[:, -1].reshape((-1, 1))  # outcome
        train_results[_type] = np.hstack((train_front, train_mid, train_end))

        test_array = np.array(test_datas[_type])
        test_front = test_array[:, :-test_last_index]
        test_end = scaler.transform(test_array[:, -test_last_index:])
        test_results[_type] = np.hstack((test_front, test_end))
    return train_results, test_results

ta_results, tt_results = scale(train_results, test_results)
for _type in types:
    print('Train(type=%s):' % _type, np.unique(ta_results[_type].max(axis=1)), np.unique(ta_results[_type].min(axis=1)))
    print('Test(type=%s):' % _type, np.unique(tt_results[_type].max(axis=1)), np.unique(tt_results[_type].min(axis=1)))