from datetime import datetime, timedelta
import os
import time
import pandas as pd
import numpy as np
from joblib import Parallel, delayed


def derive_tmp_feature(df):
    bid_price_matrix = df[[f'bid_price{i}' for i in range(10)]].to_numpy()
    bid_volume_matrix = df[[f'bid_volume{i}' for i in range(10)]].to_numpy()
    ask_price_matrix = df[[f'ask_price{i}' for i in range(10)]].to_numpy()
    ask_volume_matrix = df[[f'ask_volume{i}' for i in range(10)]].to_numpy()

    wap_denominator = (bid_volume_matrix + ask_volume_matrix)
    wap = (bid_price_matrix * bid_volume_matrix + ask_price_matrix * ask_volume_matrix) / np.where(wap_denominator == 0, np.nan, wap_denominator)
    wap_balance = np.abs(wap[:, 1] - wap[:, 2])

    price_spread_denominator = ((ask_price_matrix[:, 0] + bid_price_matrix[:, 0]) / 2)
    price_spread = (ask_price_matrix[:, 0] - bid_price_matrix[:, 0]) / np.where(price_spread_denominator == 0, np.nan, price_spread_denominator)

    bid_spread = bid_price_matrix[:, 0] - bid_price_matrix[:, 1]
    ask_spread = ask_price_matrix[:, 0] - ask_price_matrix[:, 1]

    bid_volume_matrix_sum = np.sum(bid_volume_matrix, axis=1)
    ask_volume_matrix_sum = np.sum(ask_volume_matrix, axis=1)
    total_ask_volume = bid_volume_matrix_sum + ask_volume_matrix_sum
    volume_imbalance = np.abs(bid_volume_matrix_sum - ask_volume_matrix_sum)

    wap[wap == 0] = np.nan
    ask_price_matrix[ask_price_matrix == 0] = np.nan
    bid_price_matrix[bid_price_matrix == 0] = np.nan

    log_return_wap = np.diff(np.log(wap), axis=0, prepend=np.nan)
    log_return_ask = np.diff(np.log(ask_price_matrix), axis=0, prepend=np.nan)
    log_return_bid = np.diff(np.log(bid_price_matrix), axis=0, prepend=np.nan)

    tmp_feature = {
        'date_time': df['date_time'],
        'symbol': df['symbol']
    }
    for i in range(10):
        tmp_feature[f'wap{i}'] = wap[:, i]
        tmp_feature[f'log_return_wap{i}'] = log_return_wap[:, i]
        tmp_feature[f'log_return_ask{i}'] = log_return_ask[:, i]
        tmp_feature[f'log_return_bid{i}'] = log_return_bid[:, i]

    tmp_feature['wap_balance'] = wap_balance
    tmp_feature['price_spread'] = price_spread
    tmp_feature['bid_spread'] = bid_spread
    tmp_feature['ask_spread'] = ask_spread
    tmp_feature['total_ask_volume'] = total_ask_volume
    tmp_feature['volume_imbalance'] = volume_imbalance

    return pd.DataFrame(tmp_feature)

def realized_volatility(series):
    """
    计算实际波动率
    :param series: pd.Series
    :return: 实际波动率序列
    """
    return np.sqrt(np.sum(series ** 2))


features = {
    'symbol': ['first'],
    'date_time': ['count'],
    'wap_balance': ['sum', 'mean', 'std'],
    'price_spread': ['sum', 'mean', 'std'],
    'bid_spread': ['sum', 'mean', 'std'],
    'ask_spread': ['sum', 'mean', 'std'],
    'total_ask_volume': ['sum', 'mean', 'std'],
    'volume_imbalance': ['sum', 'mean', 'std'],
    **{f'wap{i}': ['sum', 'mean', 'std'] for i in range(10)},
    **{f'log_return_wap{i}': ['sum', 'mean', 'std', realized_volatility] for i in range(10)},
    **{f'log_return_ask{i}': ['sum', 'mean', 'std', realized_volatility] for i in range(10)},
    **{f'log_return_bid{i}': ['sum', 'mean', 'std', realized_volatility] for i in range(10)},
}

def flatten_mult_index(multIndex, postfix=None):
    """
    将多级columns展开
    :param multIndex: 多级columns
    :param postfix: 需要增加的后缀
    :return: 展开后的columns
    """
    flattenNames = []
    for i in multIndex:
        if i[0] in ['symbol', 'time_group']:
            flattenNames.append(i[0])
        else:
            if postfix is None:
                flattenNames.append('_'.join(list(i)))
            else:
                flattenNames.append('_'.join(list(i) + [postfix]))
    return flattenNames

def derive_agg_feature(snapshot, interval):
    snapshot['time_group'] = snapshot['date_time'].dt.floor(interval)
    snapshot_feature = snapshot.groupby('time_group').agg(features).reset_index()
    snapshot_feature.columns = flatten_mult_index(snapshot_feature.columns)
    for time in [450, 300, 150]:
        d = snapshot[snapshot['date_time'] >= snapshot['time_group'] + timedelta(seconds=time)].groupby('time_group').agg(features).reset_index(drop=False)
        d.columns = flatten_mult_index(d.columns, str(time))
        snapshot_feature = pd.merge(snapshot_feature, d, on=['time_group', 'symbol'], how='left')
    snapshot_feature.rename(columns={'time_group': 'date_time'}, inplace=True)
    snapshot_feature.dropna(inplace=True)
    return snapshot_feature

def cal_feature(base_path, output_path, filename):
    start = datetime.now()
    print(f'processing {filename}')
    df = pd.read_parquet(os.path.join(base_path, filename))
    print(f'{filename} loaded, cost: {datetime.now() - start}')
    df['date_time'] = pd.to_datetime(df['date_time'])
    tmp_feature = derive_tmp_feature(df)
    print(f'{filename} tmp feature done, cost: {datetime.now() - start}')
    # agg_feature = derive_agg_feature2(tmp_feature, 600)
    agg_feature = derive_agg_feature(tmp_feature, '10min')
    print(f'{filename} agg feature done, cost: {datetime.now() - start}, now: {datetime.now()}')
    agg_feature.to_parquet(os.path.join(output_path, f'agg_feature_{filename}'), index=False, compression='snappy')

if __name__ == '__main__':
    start = datetime.now()
    n_jobs=8
    base_path = os.path.join(os.path.expanduser('~'), 'Data/tick10_16_partition_by_symbol')
    output_path = os.path.join(os.path.expanduser('~'), 'Data/tick10_16_partition_by_symbol_feature')

    # base_path = os.path.join(os.path.expanduser('~'), 'Data/tick10_test_1_day')
    # output_path = os.path.join(os.path.expanduser('~'), 'Data/tick10_test_1_day_feature')

    Parallel(n_jobs=n_jobs)(delayed(cal_feature)(base_path, output_path, f) for f in os.listdir(base_path) if f.endswith('.parquet'))
    print(f'total cost: {datetime.now() - start}')
# total cost: 0:03:09.585960