# -*- coding: UTF-8 -*-

import pandas as pd
import numpy as np

# 设置输出窗口大小
desired_width = 320
pd.set_option('display.width', desired_width)
pd.set_option('display.max_columns', 20)

value_column_name = 'totalValue'


def get_histogram_feature(block_data, threshold, bins):
    """
    将信息转换为histogram特征
    :param block_data: 交易信息
    :param threshold: 进行histogram处理的value阈值
    :param bins: bins
    :return: 直方图对应bin的值及列名
    """
    hist, bin_edges = np.histogram(block_data[value_column_name].loc[block_data[value_column_name] < threshold],
                                   bins=bins)
    out_threshold_tx_count = len(block_data[value_column_name].loc[block_data[value_column_name] >= threshold])
    return np.append(hist, out_threshold_tx_count)


def get_block_feature(block_data, start_time, end_time, step_minute, return_dict=None, process_num=0,
                      histogram_threshold=100, bins=10, minute_module=True,
                      extend_hist_config=({'bucket_from': 40, 'bucket_to': 60, 'bucket_bins': 5},
                                          {'bucket_from': 0, 'bucket_to': 1, 'bucket_bins': 5},
                                          {'bucket_from': 1000, 'bucket_to': 10000, 'bucket_bins': 1}),
                      use_extend_hist_config=True):
    """
    统计全网区块链数据的信息特征
    :param use_extend_hist_config: 是否使用增加分桶信息
    :param extend_hist_config: 增加的分桶
    :param block_data: 时间片内的区块信息
    :param start_time: 开始时间
    :param end_time: 结束时间
    :param step_minute: 步长
    :param return_dict: 多进程返回消息
    :param process_num: 进程号
    :param histogram_threshold: histogram阈值
    :param bins: histogram bins
    :param minute_module: 返回格式每分钟 or 每个步长
    :return: 全网区块信息特征
    """
    if start_time > end_time:
        return
    curr_time = start_time
    block_feature = []
    last_sum = -1

    while curr_time < end_time:
        curr_block_data = block_data.loc[
            (block_data['timestamp'] > (curr_time - step_minute * 60)) & (block_data['timestamp'] <= curr_time)]
        curr_sum = curr_block_data[value_column_name].sum()
        # 若相邻的两次计算区间的sum值相同，则不用重复计算
        if curr_sum == last_sum:
            curr_res = block_feature[-1][:]
            curr_res[0] = pd.to_datetime(curr_time, unit='s')
        else:
            curr_res = [pd.to_datetime(curr_time, unit='s'), len(curr_block_data),
                        curr_block_data[value_column_name].sum()]
            hist = get_histogram_feature(curr_block_data, histogram_threshold, bins)
            curr_res.extend(hist)

            if use_extend_hist_config:
                for c in extend_hist_config:
                    extend_hist, extend_edges = np.histogram(
                        curr_block_data[value_column_name].loc[(curr_block_data[value_column_name] >= c['bucket_from'])
                                                          & (curr_block_data[value_column_name] <= c['bucket_to'])],
                        bins=c['bucket_bins'])
                    curr_res.extend(extend_hist)

        block_feature.append(curr_res)
        last_sum = curr_sum
        print(pd.to_datetime(curr_time, unit='s'))
        curr_time += 60 if minute_module else step_minute * 60

    columns_name = ['time', 'block_total_tx_count', 'block_total_value']
    hist_name = ['block-hist-' + str(e) for e in list(range(1, bins + 2, 1))]
    columns_name.extend(hist_name)

    if use_extend_hist_config:
        for c in extend_hist_config:
            extend_hist_name = ['block-extend-hist-%s-%s-%s' % (c['bucket_from'], c['bucket_to'], i) for i in
                                range(1, c['bucket_bins'] + 1)]
            columns_name.extend(extend_hist_name)

    block_feature_df = pd.DataFrame(block_feature, columns=columns_name)
    block_feature_df.fillna(0, inplace=True)
    if return_dict is not None:
        return_dict[process_num] = block_feature_df
    return block_feature_df


if __name__ == '__main__':
    block_info = pd.read_csv('../data/total/total.csv')
    block_info['timestamp'] = pd.to_datetime(block_info['time'], format="%Y%m%d%H%M%S").values.astype(
        np.int64) // 10 ** 9
    start_timestamp = 1527811200
    # 20180601030000 ~ 1527822000  20181101001000 ~ 1541031000
    end_timestamp = 1527822000
    # 20180601000000 ~ 20180601030000 2个小时内的数据
    feature_df = get_block_feature(block_info, start_time=start_timestamp, end_time=end_timestamp, step_minute=10)
    print(feature_df)
