import cudf
import cupy as cp
from datetime import datetime, timedelta
import os
import time


def derive_tmp_feature(df):
    # 在矩阵计算前添加类型转换
    bid_price_matrix = df[[f'bid_price{i}' for i in range(10)]].values
    bid_volume_matrix = df[[f'bid_volume{i}' for i in range(10)]].values
    ask_price_matrix = df[[f'ask_price{i}' for i in range(10)]].values
    ask_volume_matrix = df[[f'ask_volume{i}' for i in range(10)]].values
    
    wap_denominator = (bid_volume_matrix + ask_volume_matrix)
    wap = (bid_price_matrix * bid_volume_matrix + ask_price_matrix * ask_volume_matrix) / cp.where(wap_denominator == 0, cp.nan, wap_denominator)
    wap_balance = cp.abs(wap[:, 1] - wap[:, 2])

    price_spread_denominator = ((ask_price_matrix[:, 0] + bid_price_matrix[:, 0]) / 2)
    price_spread = (ask_price_matrix[:, 0] - bid_price_matrix[:, 0]) / cp.where(price_spread_denominator == 0, cp.nan, price_spread_denominator)

    bid_spread = bid_price_matrix[:, 0] - bid_price_matrix[:, 1]
    ask_spread = ask_price_matrix[:, 0] - ask_price_matrix[:, 1]

    bid_volume_matrix_sum = cp.sum(bid_volume_matrix, axis=1)
    ask_volume_matrix_sum = cp.sum(ask_volume_matrix, axis=1)
    total_ask_volume = bid_volume_matrix_sum + ask_volume_matrix_sum
    volume_imbalance = cp.abs(bid_volume_matrix_sum - ask_volume_matrix_sum)

    wap = cp.where(wap == 0, cp.nan, wap)
    ask_price_matrix = cp.where(ask_price_matrix == 0, cp.nan, ask_price_matrix)
    bid_price_matrix = cp.where(bid_price_matrix == 0, cp.nan, bid_price_matrix)

    log_return_wap = cp.diff(cp.log(wap), axis=0, prepend=cp.nan)
    log_return_ask = cp.diff(cp.log(ask_price_matrix), axis=0, prepend=cp.nan)
    log_return_bid = cp.diff(cp.log(bid_price_matrix), axis=0, prepend=cp.nan)

    tmp_feature = cudf.DataFrame({
        'date_time': df['date_time'],
        'symbol': df['symbol']
    })
    for i in range(10):
        tmp_feature[f'wap{i}'] = wap[:, i]
        tmp_feature[f'log_return_wap{i}'] = log_return_wap[:, i]
        tmp_feature[f'log_return_ask{i}'] = log_return_ask[:, i]
        tmp_feature[f'log_return_bid{i}'] = log_return_bid[:, i]

    tmp_feature['wap_balance'] = wap_balance
    tmp_feature['price_spread'] = price_spread
    tmp_feature['bid_spread'] = bid_spread
    tmp_feature['ask_spread'] = ask_spread
    tmp_feature['total_ask_volume'] = total_ask_volume
    tmp_feature['volume_imbalance'] = volume_imbalance

    return tmp_feature

def realized_volatility(series):
    """适配cudf聚合接口的波动率计算"""
    # 确保数据类型正确转换
    if hasattr(series, 'values'):
        # 如果是cudf Series对象
        data = series.values
    else:
        # 尝试转换为cupy数组
        data = cp.asarray(series)
    
    return cp.sqrt(cp.nansum(cp.square(data)))


features = {
    'symbol': ['first'],
    'date_time': ['count'],
    'wap_balance': ['sum', 'mean', 'std'],
    'price_spread': ['sum', 'mean', 'std'],
    'bid_spread': ['sum', 'mean', 'std'],
    'ask_spread': ['sum', 'mean', 'std'],
    'total_ask_volume': ['sum', 'mean', 'std'],
    'volume_imbalance': ['sum', 'mean', 'std'],
    **{f'wap{i}': ['sum', 'mean', 'std'] for i in range(10)},
    **{f'log_return_wap{i}': ['sum', 'mean', 'std'] for i in range(10)},
    **{f'log_return_ask{i}': ['sum', 'mean', 'std'] for i in range(10)},
    **{f'log_return_bid{i}': ['sum', 'mean', 'std'] for i in range(10)},
}

def flatten_mult_index(multIndex, postfix=None):
    """
    将多级columns展开
    :param multIndex: 多级columns
    :param postfix: 需要增加的后缀
    :return: 展开后的columns
    """
    flattenNames = []
    for i in multIndex:
        if i[0] in ['symbol', 'time_group']:
            flattenNames.append(i[0])
        else:
            if postfix is None:
                flattenNames.append('_'.join(list(i)))
            else:
                flattenNames.append('_'.join(list(i) + [postfix]))
    return flattenNames

def derive_agg_feature(snapshot, interval_minutes):
    # 将时间转换为Unix时间戳（单位：秒）
    timestamps = snapshot['date_time'].astype('int64') // 1e9
    
    # 计算时间分组间隔（秒）
    interval_seconds = interval_minutes * 60
    
    # 计算分组基准时间（使用整数运算避免浮点误差）
    base_timestamps = (timestamps // interval_seconds) * interval_seconds
    snapshot['time_group'] = cudf.to_datetime(base_timestamps.astype('int64') * 1e9)
    
    snapshot_feature = snapshot.groupby('time_group').agg(features).reset_index()
    snapshot_feature.columns = flatten_mult_index(snapshot_feature.columns)
    
    # 保持原有时间窗口计算逻辑
    for time in [450, 300, 150]:
        d = snapshot[snapshot['date_time'] >= snapshot['time_group'] + timedelta(seconds=time)]
        d = d.groupby('time_group').agg(features).reset_index()
        d.columns = flatten_mult_index(d.columns, str(time))
        snapshot_feature = snapshot_feature.merge(d, on=['time_group', 'symbol'], how='left')
    
    return snapshot_feature.rename(columns={'time_group': 'date_time'}).dropna()

def cal_feature(base_path, output_path, filename):
    # 添加流处理（默认使用0号流）
    with cp.cuda.Stream():
        start = datetime.now()
        
        # 使用低级API读取以减少内存使用
        df = cudf.read_parquet(os.path.join(base_path, filename), 
                             columns=[col for col in cudf.read_parquet(os.path.join(base_path, filename), 
                                                                    nrows=1).columns])
        df['date_time'] = cudf.to_datetime(df['date_time'])
        
        # 分块处理大文件
        if len(df) > 1e6:  # 超过100万行时启用分块
            chunk_size = int(1e6)
            chunks = [df[i:i+chunk_size] for i in range(0, len(df), chunk_size)]
            
            tmp_features = []
            for chunk in chunks:
                tmp = derive_tmp_feature(chunk)
                tmp_features.append(tmp)
                del chunk  # 及时释放内存
                
            tmp_feature = cudf.concat(tmp_features)
            del tmp_features
        else:
            tmp_feature = derive_tmp_feature(df)
        
        # 聚合特征后立即释放中间数据
        agg_feature = derive_agg_feature(tmp_feature, 10)  # 10表示10分钟间隔
        del tmp_feature
        
        # 输出后立即释放内存
        agg_pd = agg_feature.to_pandas()
        agg_pd.to_parquet(os.path.join(output_path, f'agg_feature_{filename}'), 
                         index=False, compression='snappy')
        del agg_feature, agg_pd
        
        # 强制垃圾回收
        cp.get_default_memory_pool().free_all_blocks()

# 单独计算波动率特征
def add_realized_volatility(df, group_col='time_group'):
    """在聚合后单独计算波动率"""
    result = df.copy()
    
    # 为需要计算波动率的列添加波动率特征
    for i in range(10):
        for col_prefix in ['log_return_wap', 'log_return_ask', 'log_return_bid']:
            col = f'{col_prefix}{i}'
            result[f'{col}_realized_volatility'] = df.groupby(group_col)[col].apply(
                lambda x: cp.sqrt(cp.nansum(cp.square(x.values)))
            )
    
    return result

if __name__ == '__main__':
    start = datetime.now()
    
    # 显存优化配置
    os.environ['CUDA_MEMORY_POOL_TYPE'] = 'Segmented'  # 更高效的内存管理
    os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID'     # 确保设备顺序正确
    
    base_path = os.path.join(os.path.expanduser('~'), 'Data/tick10_16_partition_by_symbol')
    output_path = os.path.join(os.path.expanduser('~'), 'Data/tick10_16_partition_by_symbol_feature')
    
    # 确保输出目录存在
    os.makedirs(output_path, exist_ok=True)

    # 创建处理队列（保持顺序但优化内存）
    file_queue = [f for f in os.listdir(base_path) if f.endswith('.parquet')]
    
    # 单卡处理优化方案
    for idx, filename in enumerate(file_queue):
        try:
            print(f'处理第 {idx+1}/{len(file_queue)} 个文件: {filename}')
            
            # 显存清理机制
            if idx % 10 == 0:  # 每处理10个文件清理一次
                cp.get_default_memory_pool().free_all_blocks()
                cp.get_default_pinned_memory_pool().free_all_blocks()
            
            cal_feature(base_path, output_path, filename)
        except Exception as e:
            print(f'处理文件 {filename} 时出错: {str(e)}')
            continue
    
    print(f'总耗时: {datetime.now() - start}')
# 总耗时: 0:00:29.816545