from util import *
import pandas as pd
import numpy as np





def get_unique_value():
    path = "F:/data/data/"
    dir = "CallGraph"
    data = pd.read_csv(path+dir+"/CallGraph_20.csv", error_bad_lines=False)
    unique_value=data["rpctype"].unique()
    print(unique_value)


def sift_and_clean_data(df_temp,percent):
    # 删除无用的列
    drop_colums_name = ["uminstanceid", "interface", "dminstanceid"]
    df_temp = df_temp.drop(columns=drop_colums_name)
    # 随机选择一定比例
    # 对某一列进行分组
    df_temp_groups = df_temp.groupby('traceid')
    # 随机选择要保留的组
    groups_to_keep = df_temp_groups.groups.keys()
    selected_groups = np.random.choice(list(groups_to_keep), size=int(len(groups_to_keep) * percent), replace=False)
    print(f"selected_groups:{selected_groups}")
    # 根据选择的组来过滤原 DataFrame
    df_temp = df_temp[df_temp['traceid'].isin(selected_groups)]

    ##删除包含unknow的组
    groups_with_unknown = df_temp[df_temp['um'].str.contains('UNKNOWN', na=False)]['traceid'].unique()
    df_temp = df_temp[~df_temp['traceid'].isin(groups_with_unknown)]
    groups_with_unknown = df_temp[df_temp['dm'].str.contains('UNKNOWN', na=False)]['traceid'].unique()
    df_temp = df_temp[~df_temp['traceid'].isin(groups_with_unknown)]
    #删除 um没有USER，rpc_id没有0， rpctype 没有http 的组（同时满足）
    # # 创建一个布尔掩码，检查每个组是否存在满足条件的行
    # mask = (df_temp['um'] == 'USER') & (df_temp['rpc_id'] == 0) & (df_temp['rpctype'] == 'http')
    # valid_traceids = df_temp[mask].groupby('traceid').filter(lambda g: not g.empty)['traceid'].unique()
    # df_temp = df_temp[df_temp['traceid'].isin(valid_traceids)]
    ####
    # groups_with_zero_um = df_temp.groupby('traceid')['rpc_id'].apply(lambda x: x.isin(['0']).any()).reset_index()
    # valid_groups = groups_with_zero_um[groups_with_zero_um['rpc_id']].traceid
    # df_temp = df_temp[df_temp['traceid'].isin(valid_groups)]
    #删除用户rt响应时间小于除mq之外的call链的组
    # 强制转换 timestamp 和 rt 列为浮点数类型


    df_temp = df_temp.groupby('traceid', group_keys=False).apply(process_group)

    return df_temp


# 定义处理每个组的逻辑
def process_group(group):
    if group.isnull().values.any() or (group=="None").any().any() :
        return group[0:0]  # 返回空 DataFrame 以丢弃该组

    group['timestamp'] = group['timestamp'].astype(float)  # 转换为浮点数
    group['rt'] = group['rt'].astype(float)  # 转换为浮点数


    # 找到 um='USER', rpc_id=0, rpctype='http' 的行
    condition = (group['um'] == 'USER') & (group['rpc_id'].isin(['0'])) & (group['rpctype'] == 'http')
    selected_rows = group[condition]

    # 如果没有找到符合条件的行，则直接返回该组
    if selected_rows.empty:
        return group[0:0]

    # 计算 max_rt
    max_rt = (selected_rows['timestamp'] + selected_rows['rt']).max()

    # 找出 rpctype 为 'mq' 的行对应的 rpc_id
    mq_rpc_ids = group[group['rpctype'] == 'mq']['rpc_id'].unique()  # 获取唯一 rpc_id
    mq_rpc_id_prefixes = [str(rpc_id) for rpc_id in mq_rpc_ids]  # 转换为字符串以便比较

    # 检查不以 mq_rpc_id_prefixes 开头的行
    condition_to_keep = ~group['rpc_id'].astype(str).str.startswith(tuple(mq_rpc_id_prefixes))

    # 过滤出满足条件的行
    filtered_group = group[condition_to_keep]

    # 检查过滤后的行中是否存在 timestamp + rt > max_rt
    if any((filtered_group['timestamp'] + filtered_group['rt']) > max_rt):
        return group[0:0]  # 丢弃整个该组，返回一个空的 DataFrame

    return group  # 保留原组（满足条件）

def get_specific_tracedata(percent):
    path = "F:/data/data/"
    dir = "CallGraph"
    # 参数设置
    # np.random.seed(2)  # 设置随机种子以确保结果可重复


    df_all = pd.DataFrame()
    tar_file_name=list_tar_files(path+dir)
    for file in tar_file_name[:2]:
        print(file)
        df_temp=read_csv_from_tar_gz(path+dir+"/"+file)
        # df_temp = pd.read_csv(path+dir+"/CallGraph_20_view.csv", error_bad_lines=False)
        df_temp=sift_and_clean_data(df_temp,percent)

        if df_all.empty:
            df_all = df_temp
        else:
            df_all = pd.concat([df_all, df_temp])

    # 排序
    # 按照 traceid 和 timestamp 排序组内数据
    # 然后按照 Group 最小时间戳进行排序
    sorted_df = df_all.sort_values(by=['traceid', 'timestamp']).copy()

    # 获取每个组的最小时间戳并按组排序
    sorted_groups = sorted_df.groupby('traceid').agg(MinTimestamp=('timestamp', 'min')).reset_index()
    sorted_groups = sorted_groups.sort_values(by='MinTimestamp')

    # 将排序后的组与原数据合并
    result = pd.merge(sorted_groups, sorted_df, on='traceid')

    # 根据最小时间戳和 Timestamp 进行最终排序
    df_all = result.sort_values(by=['MinTimestamp', 'timestamp'])
    df_all = df_all.drop(columns=["MinTimestamp"])
    df_all.to_csv(f"CallGraph_cleaned_{percent}.csv", index=False)





get_specific_tracedata(0.05)