#!/usr/bin/env python
# -*- coding:utf-8 -*-
# @Time    : 2018/11/18 18:50
# @Author  : liujiantao
# @Site    :
# @File    : operation_ftr_extract.py
# @Software: PyCharm
import os
from tiancheng.base.base_helper import *

ftr_path = features_base_path + "tst_merge_ftr02.pkl"
tst_merge_path = features_base_path + "tst_merge.pkl"
print(ftr_path)
tst_merge = pd.read_pickle(tst_merge_path)
# tst_merge.columns
# tst_merge = filed2encode_f(tst_merge,tr_hd.merchant)
# tst_merge = filed2encode_f(tst_merge,tr_hd.code1)
# 前向填充，使用默认是上一行的值,设置axis=1可以使用列进行填充
# tst_merge = tst_merge.fillna(method="ffill")
# # 后向填充，使用下一行的值,不存在的时候就不填充
# tst_merge = tst_merge.fillna(method="bfill")
# tst_merge.fillna(-1, inplace=True)  # 全空默认 -1
# pd.to_pickle(tst_merge,tst_merge_path)

UID_LIST = list(set(tst_merge[tag_hd.UID].tolist()))
print(len(UID_LIST))  # 60643

ftr_list = []


def outliers_detection02(data=None):
    """
    异常点检查
    :param data:
    :return:
    """
    # from sklearn import svm
    from sklearn import preprocessing
    from sklearn.ensemble import IsolationForest
    try:
        # tag_uid = data[tag_header].drop_duplicates(tag_hd.UID)
        tag_uid = data[tag_header]
        # if data.drop_duplicates().shape[0] <= 1:
        #     data = data.astype('float64')
        #     return data, 0, tag_uid
        data = data.drop_duplicates()
        sample_siaze = data.shape[0]

        data = data.reset_index(drop=True)
        data = data.astype('float64')
        data.pop(tag_hd.Tag)
        data.pop(tag_hd.UID)
        normolized_data = preprocessing.StandardScaler().fit_transform(data)
        # out_fraction = 0.001
        # nu_estimate = 0.95 * out_fraction + 0.05
        # mechine_learning = svm.OneClassSVM(kernel="rbf", degree=3, gamma=1.0 / len(normolized_data), nu=nu_estimate)
        # mechine_learning.fit(normolized_data)
        rng = np.random.RandomState(42)
        outliers_fraction = 0.001  # 异常样本比例
        # fit the model
        mechine_learning = IsolationForest(max_samples=sample_siaze, random_state=rng, contamination=outliers_fraction)
        mechine_learning.fit(normolized_data)
        detection = mechine_learning.predict(normolized_data)
        outliers = list(np.where(detection == -1)[0])
        regular = list(np.where(detection == 1)[0])
        outliers_rate = len(outliers) / sample_siaze
        # res = normolized_data[regular]
        if len(regular) <= 0:
            df = data.loc[outliers]
            tag_uid = tag_uid.loc[outliers]
        else:
            df = data.loc[regular]
            tag_uid = tag_uid.loc[regular]
        if df.shape[0] <= 1:
            df = add_row(df)
        df[tag_hd.UID] = tag_uid[tag_hd.UID].tolist()
        df[tag_hd.Tag] = tag_uid[tag_hd.Tag].tolist()
        print("outliers_rate:", outliers_rate)
        del regular
        del data
    except:
        traceback.print_exc()
        print(123)
    return df


# 异常点检测
def extract_feature(data):
    """
    统计特征提取
    '总数', '最小值', '最小值位置', '25%分位数',
    '中位数', '75%分位数', '均值', '最大值', '最大值位数', '平均绝对偏差', '方差', '标准差',
    '偏度', '峰度'
    :param data:
    :param columns:
    :return:
    """
    col = list(data.columns)
    col.remove(tag_hd.Tag)
    col.remove(tag_hd.UID)
    label = data[tag_header].drop_duplicates()
    data = data.astype('float64')
    # time
    for i, feature in enumerate(col):
        print(i, feature)
        gp_uid = data.groupby(['UID'])
        gp_uid_feature = gp_uid[feature]
        try:
            label = label.merge(gp_uid_feature.nunique().reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.max().reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.min().reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.sum().reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.count().reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.mean().reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.std().reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.quantile(.25).reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.quantile(.75).reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.idxmax().reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.mad().reset_index(), on='UID', how='left')
            label = label.merge(gp_uid_feature.var().reset_index(), on='UID', how='left')
        except:
            traceback.print_exc(11)
            print(123)
            # pd.to_pickle(label, ftr_path)
    # val_list = list(label.values)
    pd.to_pickle(label, ftr_path)
    return label


# tran_data = outliers_detection02(tst_merge[tst_merge[tag_hd.Tag] != -1])
tran_data = tst_merge[tst_merge[tag_hd.Tag] != -1]
test_data = tst_merge[tst_merge[tag_hd.Tag] == -1]
print(tran_data.shape)
print(test_data.shape)
tst_merge = pd.concat([tran_data, test_data])
print(tst_merge.shape)
print(len(list(set(test_data[tag_hd.UID].tolist()))))
print(len(list(set(tran_data[tag_hd.UID].tolist()))))
print(len(list(set(tst_merge[tag_hd.UID].tolist()))))

extract_feature(tst_merge)

# value0 = [[row for row in ftr_list] for i in range(len(ftr_list[0]))]

# clos_time = [(30000, 36000), (36000, 42000), (42000, 48000), (48000, 54000), (54000, 60000)]
#
# parallel_map(extract_feature, clos_time, 5)
