from util import *
import pickle
import sys
sys.path.append(f"{get_root_dir()}/MS_system")
from Request import *

def sift_dynamic_one_process(min_ratio, max_ratio, file_name_path):
    df_temp=pd.read_csv(file_name_path)
    history_base_file=get_root_dir()+"/data_process/dealed_data/dataset_0d11_0d12.base"
    with open(history_base_file, "rb") as file:
        service_to_callgraph_dict_temp=pickle.load(file)
    service_to_max_size={}
    for service in service_to_callgraph_dict_temp.keys():
        service_to_max_size[service]=service_to_callgraph_dict_temp[service].all_task_num


    # 组规模 a：每个 traceid 的行数（与原行对齐）
    # 如果希望把 traceid 为 NaN 的行也视作一组，可将 dropna=False
    a = df_temp.groupby("traceid")["traceid"].transform("size")

    # 组内 service（组内应相同），用 first 回填到每行
    service_per_row = df_temp.groupby("traceid")["service"].transform("first")

    # 映射得到 b（与每行对齐）
    b = service_per_row.map(service_to_max_size)

    # 构造有效性掩码：b 存在且 > 0（避免除零/NaN）
    valid_b = b.notna() & (b > 0)

    # 比例 a/b
    ratio = a.div(b)

    # 区间筛选：左闭右开 [min_ratio, max_ratio)
    # 若需要闭区间，请改为: (ratio >= min_ratio) & (ratio <= max_ratio)
    mask = valid_b & (ratio >= min_ratio) & (ratio < max_ratio)

    # 保持原相对顺序
    df_filtered = df_temp.loc[mask].copy()

    return df_filtered



import multiprocessing
import numpy as np
def sift_dynamic(min_size, max_size,specific):
    para_list=[]
    trace_list=[i for i in range(10)]
    for trace_id in trace_list:
        if specific=="history":
            dataset="CallGraph_dataset_0d11_0d12"
            aim_file_name_path=get_datas_dir()+f"/CallGraph_dataset_0d11_0d12_new_cleaned_0.1_s{trace_id}_{specific}.csv"
        else:
            dataset="CallGraph_dataset_0d12_0d13"
            aim_file_name_path=get_datas_dir()+f"/CallGraph_dataset_0d12_0d13_cleaned_0.1_s{trace_id}_{specific}.csv"
        para_list.append((min_size, max_size, aim_file_name_path))
    
    df_all=pd.DataFrame() 
    with multiprocessing.Pool(processes=10) as pool:
        # 提交多个任务
        tasks = [pool.apply_async(sift_dynamic_one_process, para) for para in para_list]    

        for future in tasks:
            df_temp=future.get()

            if df_all.empty:
                df_all = df_temp
            else:
                df_all = pd.concat([df_all, df_temp])
    
    print("开始合并处理。。。")
    sorted_df = df_all.sort_values(by=['traceid', 'timestamp']).copy()
    # 获取每个组的最小时间戳并按组排序
    sorted_groups = sorted_df.groupby('traceid').agg(MinTimestamp=('timestamp', 'min')).reset_index()
    sorted_groups = sorted_groups.sort_values(by='MinTimestamp')

    # 将排序后的组与原数据合并
    result = pd.merge(sorted_groups, sorted_df, on='traceid')
    result['random_num'] = result['traceid'].map({traceid: np.random.rand() for traceid in result['traceid'].unique()})
    # 根据最小时间戳和 Timestamp 进行最终排序
    df_all = result.sort_values(by=['MinTimestamp', 'random_num', 'timestamp'])
    df_all = df_all.drop(columns=["MinTimestamp",'random_num'])
    df_all.to_csv(get_datas_dir()+f"/{dataset}_dynamic{round(min_size,1)}_{round(max_size,1)}_{specific}.csv", index=False)

def main(specific):
    # specific="history"
    size_list=[i/5 for i in range(5)]
    for size in size_list:
        sift_dynamic(size,size+0.2, specific)
        
main("history")
main("validate")
