##
from util import *
import pandas as pd
import numpy as np
import multiprocessing
from tqdm import tqdm



# def get_unique_value():
#     path = "F:/data/data/"
#     dir = "CallGraph"
#     data = pd.read_csv(path+dir+"/CallGraph_20.csv", error_bad_lines=False)
#     unique_value=data["rpctype"].unique()
#     print(unique_value)


def sift_and_clean_data(df_temp,percent):
    # global pbar
    # 删除无用的列
    drop_colums_name = ["uminstanceid", "interface", "dminstanceid"]
    df_temp = df_temp.drop(columns=drop_colums_name)
    # 随机选择一定比例
    # 对某一列进行分组
    df_temp_groups = df_temp.groupby('traceid')
    # 随机选择要保留的组
    groups_to_keep = df_temp_groups.groups.keys()
    if round(percent,4)!=1:
        selected_groups = np.random.choice(list(groups_to_keep), size=int(len(groups_to_keep) * percent), replace=False)
        # print(f"selected_groups length:{len(selected_groups)}")
        # 根据选择的组来过滤原 DataFrame
        df_temp = df_temp[df_temp['traceid'].isin(selected_groups)]

    ##删除包含unknow的组
    groups_with_unknown = df_temp[df_temp['um'].str.contains('UNKNOWN', na=False)]['traceid'].unique()
    df_temp = df_temp[~df_temp['traceid'].isin(groups_with_unknown)]
    groups_with_unknown = df_temp[df_temp['dm'].str.contains('UNKNOWN', na=False)]['traceid'].unique()
    df_temp = df_temp[~df_temp['traceid'].isin(groups_with_unknown)]
    #删除 um没有USER，rpc_id没有0， rpctype 没有http 的组（同时满足）
    # # 创建一个布尔掩码，检查每个组是否存在满足条件的行
    # mask = (df_temp['um'] == 'USER') & (df_temp['rpc_id'] == 0) & (df_temp['rpctype'] == 'http')
    # valid_traceids = df_temp[mask].groupby('traceid').filter(lambda g: not g.empty)['traceid'].unique()
    # df_temp = df_temp[df_temp['traceid'].isin(valid_traceids)]
    ####
    # groups_with_zero_um = df_temp.groupby('traceid')['rpc_id'].apply(lambda x: x.isin(['0']).any()).reset_index()
    # valid_groups = groups_with_zero_um[groups_with_zero_um['rpc_id']].traceid
    # df_temp = df_temp[df_temp['traceid'].isin(valid_groups)]
    #删除用户rt响应时间小于除mq之外的call链的组
    # 强制转换 timestamp 和 rt 列为浮点数类型
    # df_temp_groups = df_temp.groupby('traceid')
    print(f"selected_groups length:{len(list(groups_to_keep))}")
    # pbar=tqdm(total=len(selected_groups))
    df_temp = df_temp.groupby('traceid', group_keys=False).apply(process_group)

    return df_temp


# 定义处理每个组的逻辑
def process_group(group):
    # global pbar
    
    # pbar.update(1)
    if group.isnull().values.any() or (group=="None").any().any() :
        return group[0:0]  # 返回空 DataFrame 以丢弃该组


    # 找到 um='USER', rpc_id=0, rpctype='http' 的行
    condition = (group['um'] == 'USER') & (group['rpc_id'].isin(['0'])) & (group['rpctype'] == 'http')
    selected_rows = group[condition]

    # 如果没有找到符合条件的行，则直接返回该组
    if selected_rows.empty:
        return group[0:0]
    
    if group.shape[0]<2:
        return group[0:0]
    # # 计算 max_rt
    # max_rt = (selected_rows['timestamp'] + selected_rows['rt']).max()

    # # 找出 rpctype 为 'mq' 的行对应的 rpc_id
    # mq_rpc_ids = group[group['rpctype'] == 'mq']['rpc_id'].unique()  # 获取唯一 rpc_id
    # mq_rpc_id_prefixes = [str(rpc_id) for rpc_id in mq_rpc_ids]  # 转换为字符串以便比较

    # # 检查不以 mq_rpc_id_prefixes 开头的行
    # condition_to_keep = ~group['rpc_id'].astype(str).str.startswith(tuple(mq_rpc_id_prefixes))

    # # 过滤出满足条件的行
    # filtered_group = group[condition_to_keep]

    # # 检查过滤后的行中是否存在 timestamp + rt > max_rt
    # if any((filtered_group['timestamp'] + filtered_group['rt']) > max_rt):
    #     return group[0:0]  # 丢弃整个该组，返回一个空的 DataFrame

    return group  # 保留原组（满足条件）


def clean_and_sample_one_process(percent, top_n_file_path, tar_gz_file_path,service_name):
    print(f"start deal {tar_gz_file_path}")
    #加载top n
    top_pd=pd.read_csv(top_n_file_path)
    
    if "service" in top_pd.columns:
        #基于service进行选择
        if service_name==None:
            top_n_service_list=top_pd["service"].tolist()
        else:
            top_n_service_list=[service_name]
        df_temp=read_csv_from_tar_gz(tar_gz_file_path)
        df_temp=df_temp[df_temp["service"].isin(top_n_service_list)]
        df_temp=sift_and_clean_data(df_temp,percent)
    else:
        #基于microservice进行选择
        top_n_microservice_set=set(top_pd["dm"])
        df_temp=read_csv_from_tar_gz(tar_gz_file_path)
        df_temp = df_temp.groupby('traceid').filter(lambda g: set(g['dm']).issubset(top_n_microservice_set))            # 按 traceid 分组
        # df_temp=df_temp[df_temp["dm"].isin(top_n_microservice_list)]
        df_temp=sift_and_clean_data(df_temp,percent)
    print(f"end deal {tar_gz_file_path}")
    return df_temp


def clean_and_sample_dataset_multi_processing(seed, percent, dataset_path,  out_file_path, top_n_file_path, file_number ,parallel_num, high_frequency_N=0, service_name=None):
    np.random.seed(seed)

    #判断文件是否已经存在
    if os.path.exists(out_file_path):
        print(f"文件存在 ({out_file_path})")
        return

    dir = "CallGraph"
    # 参数设置

    df_all = pd.DataFrame()
    tar_file_name=list_tar_files(os.path.join(dataset_path,dir))
    print(f"需要处理的文件：{tar_file_name}")
    para_list=[]
    if file_number<=0 or file_number>len(tar_file_name):
        file_number=len(tar_file_name)
    para_list=[]
    for file in tar_file_name[:file_number]:
        tar_gz_file_path=os.path.join(dataset_path,dir)+"/"+file
        para_list.append((percent, top_n_file_path, tar_gz_file_path, service_name))
        
    with multiprocessing.Pool(processes=parallel_num) as pool:
        # 提交多个任务
        tasks = [pool.apply_async(clean_and_sample_one_process, para) for para in para_list]    

        for future in tasks:
            df_temp=future.get()

            if df_all.empty:
                df_all = df_temp
            else:
                df_all = pd.concat([df_all, df_temp])
    
    print("开始综合处理。。。")
    # 排序
    # 按照 traceid 和 timestamp 排序组内数据
    # 然后按照 Group 最小时间戳进行排序
    service_to_times=df_all.groupby("traceid")["service"].unique().explode().value_counts()
    if len(service_to_times)<high_frequency_N:
        print(f"clean 后service数量不够 {high_frequency_N}, 需要增加前面top N的比例")
        exit(-1)
    if high_frequency_N==0:
        aim_service_list=service_to_times.sort_values(ascending=False).reset_index()["service"].tolist()
    else:
        aim_service_list=service_to_times.sort_values(ascending=False).head(high_frequency_N).reset_index()["service"].tolist()
    #选择频率top N的service
    df_all=df_all[df_all["service"].isin(aim_service_list)]
    
    sorted_df = df_all.sort_values(by=['traceid', 'timestamp']).copy()
    # 获取每个组的最小时间戳并按组排序
    sorted_groups = sorted_df.groupby('traceid').agg(MinTimestamp=('timestamp', 'min')).reset_index()
    sorted_groups = sorted_groups.sort_values(by='MinTimestamp')

    # 将排序后的组与原数据合并
    result = pd.merge(sorted_groups, sorted_df, on='traceid')
    result['random_num'] = result['traceid'].map({traceid: np.random.rand() for traceid in result['traceid'].unique()})
    # 根据最小时间戳和 Timestamp 进行最终排序
    df_all = result.sort_values(by=['MinTimestamp', 'random_num', 'timestamp'])
    df_all = df_all.drop(columns=["MinTimestamp",'random_num'])
    df_all.to_csv(f"{out_file_path}", index=False)

def clean_and_sample_and_split_one_process(percent, aim_service_list, tar_gz_file_path):
    try:
        print(f"start deal {tar_gz_file_path}")
        df_temp=read_csv_from_tar_gz(tar_gz_file_path)
        df_temp=df_temp[df_temp["service"].isin(aim_service_list)]
        df_temp=sift_and_clean_data(df_temp,percent)
        print(f"end deal {tar_gz_file_path}")
    except MemoryError:
        print(f"内存不足！！！{tar_gz_file_path}")
        exit(-1)
    
    return df_temp
def save_to_file_one_process(service_name, pd_all_t, out_file_dir):
    
    try:
        print(f"start save {service_name}")
        df_spec_service=pd_all_t[pd_all_t["service"]==service_name]
        # if len(df_spec_service)==0:
        #     return
        sorted_df = df_spec_service.sort_values(by=['traceid', 'timestamp']).copy()
        # 获取每个组的最小时间戳并按组排序
        sorted_groups = sorted_df.groupby('traceid').agg(MinTimestamp=('timestamp', 'min')).reset_index()
        sorted_groups = sorted_groups.sort_values(by='MinTimestamp')

        # 将排序后的组与原数据合并
        result = pd.merge(sorted_groups, sorted_df, on='traceid')
        result['random_num'] = result['traceid'].map({traceid: np.random.rand() for traceid in result['traceid'].unique()})
        # 根据最小时间戳和 Timestamp 进行最终排序
        df_spec_service = result.sort_values(by=['MinTimestamp', 'random_num', 'timestamp'])
        df_spec_service = df_spec_service.drop(columns=["MinTimestamp",'random_num'])
        df_spec_service.to_csv(out_file_dir+service_name+".csv", index=False)
        print(f"end save {service_name}")
    except:
        print(f"*****wrong with{service_name}")
    
    
    
    
    
def clean_and_sample_and_split_dataset_multi_processing(dataset_path,  out_file_dir, frequency_threshold, percent,  top_n_file_path, file_number ,parallel_num):
    np.random.seed(2)
    #判断文件是否都已经存在
    all_exit=True
    top_pd=pd.read_csv(top_n_file_path)
    aim_service_list=[]
    for index, value in top_pd.iloc[:, 0].items():
        time=top_pd.iloc[index,1]
        if time>frequency_threshold and not os.path.isfile(out_file_dir+value+".csv"):
            aim_service_list.append(value)
            all_exit=False
    if all_exit==True:
        print("文件已全部划分好！")
        return
    
    print(f"剩余没处理的service数量：{len(aim_service_list)}")
    dir = "CallGraph"
    # 参数设置

    df_all = pd.DataFrame()
    tar_file_name=list_tar_files(os.path.join(dataset_path,dir))
    tar_file_name.sort(reverse=True)
    print(f"需要处理的文件：{tar_file_name}")
    para_list=[]
    if file_number<=0 or file_number>len(tar_file_name):
        file_number=len(tar_file_name)
    para_list=[]
    for file in tar_file_name[:file_number]:
    # for file in tar_file_name[2:3]:
        tar_gz_file_path=os.path.join(dataset_path,dir)+"/"+file
        para_list.append((percent, aim_service_list, tar_gz_file_path))
        
    with multiprocessing.Pool(processes=parallel_num) as pool:
        # 提交多个任务
        tasks = [pool.apply_async(clean_and_sample_and_split_one_process, para) for para in para_list]    

        for future in tasks:
            df_temp=future.get()

            if df_all.empty:
                df_all = df_temp
            else:
                df_all = pd.concat([df_all, df_temp])
    
    print("开始综合处理。。。")
    # 排序
    # 按照 traceid 和 timestamp 排序组内数据
    # 然后按照 Group 最小时间戳进行排序
    # para_list2=[]
    # for service_name in aim_service_list:
    #     para_list2.append((service_name, df_all[df_all["service"]==service_name], out_file_dir))
        
    print(f"参数数量：{len(aim_service_list)}")
    with multiprocessing.Pool(processes=10) as pool:
        tasks_list=[]
        for service_name in aim_service_list:
            para=(service_name, df_all[df_all["service"]==service_name], out_file_dir)
            tasks_list.append(pool.apply_async(save_to_file_one_process, para))
        print("任务提交完成，等待结束！")
        # # 提交多个任务
        # tasks = [pool.apply_async(save_to_file_one_process, para) for para in para_list2]
         # 等待所有任务完成
        for task in tasks_list:
            task.get()
    
    print("程序结束。")
    
# def clean_and_sample_dataset(percent,dataset_dir_t,seed, out_file_name, file_number,  top_n_service_file):
#     global parallel_num
#     np.random.seed(seed)
#     cur_dir=get_cur_dir()
#     out_file_path=f"dealed_data/{out_file_name}"
#     #判断文件是否已经存在
#     if os.path.exists(os.path.join(cur_dir,out_file_path)):
#         print(f"文件存在 ({out_file_path})")
#         return
    
#     dataset_dir=cur_dir+f"/{dataset_dir_t}"
#     dir = "CallGraph"
#     # 参数设置

#     df_all = pd.DataFrame()
#     tar_file_name=list_tar_files(os.path.join(dataset_dir,dir))
#     print(f"需要处理的文件：{tar_file_name}")
#     para_list=[]
#     if file_number<=0 or file_number>len(tar_file_name):
#         file_number=len(tar_file_name)
#     for file in tar_file_name[:file_number]:
#         tar_gz_file_path=os.path.join(dataset_dir,dir)+"/"+file
#         top_n_file_path=cur_dir+f"/dealed_data/{top_n_service_file}"
#         para_list.append((percent, top_n_file_path, tar_gz_file_path))
        
#     with multiprocessing.Pool(processes=parallel_num) as pool:
#         # 提交多个任务
#         tasks = [pool.apply_async(one_process, para) for para in para_list]    

#         for future in tasks:
#             df_temp=future.get()

#             if df_all.empty:
#                 df_all = df_temp
#             else:
#                 df_all = pd.concat([df_all, df_temp])
    
        
#     # 排序
#     # 按照 traceid 和 timestamp 排序组内数据
#     # 然后按照 Group 最小时间戳进行排序
#     sorted_df = df_all.sort_values(by=['traceid', 'timestamp']).copy()

#     # 获取每个组的最小时间戳并按组排序
#     sorted_groups = sorted_df.groupby('traceid').agg(MinTimestamp=('timestamp', 'min')).reset_index()
#     sorted_groups = sorted_groups.sort_values(by='MinTimestamp')

#     # 将排序后的组与原数据合并
#     result = pd.merge(sorted_groups, sorted_df, on='traceid')
#     result['random_num'] = result['traceid'].map({traceid: np.random.rand() for traceid in result['traceid'].unique()})
#     # 根据最小时间戳和 Timestamp 进行最终排序
#     df_all = result.sort_values(by=['MinTimestamp', 'timestamp','random_num'])
#     df_all = df_all.drop(columns=["MinTimestamp",'random_num'])
#     df_all.to_csv(f"{out_file_path}", index=False)


if __name__=="_main_":
    # 设置随机种子以确保结果可重复
    seed=10
    percent=0.005
    dataset_dir="dataset_0d11_0d12"

    data_kind="history"
    out_file_name=f"CallGraph_cleaned_v1_{percent}_s{seed}_{data_kind}.csv"
    # clean_and_sample_dataset(percent, dataset_dir, seed, out_file_name)