from util import *



def sift_callgraph_size_one_process(min_size, max_size, file_name_path):
    print(f"start dealing {file_name_path}!")
    df_temp=pd.read_csv(file_name_path)
    # print(df_temp.head())
    # return df_temp.loc[:10]
    # 统计每行对应的 trace_id 的出现次数（保持原行序）
    # 注意：NaN 的 trace_id 会被排除在分组之外（通常符合预期）
    counts = df_temp.groupby('traceid')['traceid'].transform('size')

    # 生成掩码：min_size <= count < max_size
    mask = (counts >= min_size) & (counts < max_size)

    # 使用 loc 保持原有相对顺序；copy() 避免视图带来的连锁赋值问题
    df_filtered = df_temp.loc[mask].copy()

    return df_filtered


import multiprocessing
import numpy as np
def sift_callgraph_size(min_size, max_size,specific):
    para_list=[]
    trace_list=[i for i in range(10)]
    for trace_id in trace_list:
        if specific=="history":
            dataset="CallGraph_dataset_0d11_0d12"
            aim_file_name_path=get_datas_dir()+f"/CallGraph_dataset_0d11_0d12_new_cleaned_0.1_s{trace_id}_{specific}.csv"
        else:
            dataset="CallGraph_dataset_0d12_0d13"
            aim_file_name_path=get_datas_dir()+f"/CallGraph_dataset_0d12_0d13_cleaned_0.1_s{trace_id}_{specific}.csv"
        para_list.append((min_size, max_size, aim_file_name_path))
    
    df_all=pd.DataFrame() 
    with multiprocessing.Pool(processes=10) as pool:
        # 提交多个任务
        tasks = [pool.apply_async(sift_callgraph_size_one_process, para) for para in para_list]    

        for future in tasks:
            df_temp=future.get()

            if df_all.empty:
                df_all = df_temp
            else:
                df_all = pd.concat([df_all, df_temp])
    
    print("开始合并处理。。。")
    sorted_df = df_all.sort_values(by=['traceid', 'timestamp']).copy()
    # 获取每个组的最小时间戳并按组排序
    sorted_groups = sorted_df.groupby('traceid').agg(MinTimestamp=('timestamp', 'min')).reset_index()
    sorted_groups = sorted_groups.sort_values(by='MinTimestamp')

    # 将排序后的组与原数据合并
    result = pd.merge(sorted_groups, sorted_df, on='traceid')
    result['random_num'] = result['traceid'].map({traceid: np.random.rand() for traceid in result['traceid'].unique()})
    # 根据最小时间戳和 Timestamp 进行最终排序
    df_all = result.sort_values(by=['MinTimestamp', 'random_num', 'timestamp'])
    df_all = df_all.drop(columns=["MinTimestamp",'random_num'])
    df_all.to_csv(get_datas_dir()+f"/{dataset}_size{min_size}_{max_size}_{specific}.csv", index=False)

def main(specific):
    # specific="history"
    size_list=[i*10 for i in range(5)]
    for size in size_list:
        sift_callgraph_size(size,size+10, specific)
        
main("history")
main("validate")
