from util import *
import pandas as pd
import multiprocessing


def request_number_one_process(tar_gz_file_path,S_or_MS):
    print(f"start deal {tar_gz_file_path}")
    df_temp=read_csv_from_tar_gz(tar_gz_file_path)
    # df_temp = pd.read_csv(os.path.join(dataset_dir,dir)+"/CallGraph_20.tar.gz.csv")
    # df_temp=df_temp[["service","dm","traceid"]]
    # # 基于traceid分组，并统计每个组的数量
    # group_counts = df_temp.groupby('traceid').size()
    # # 获取数量大于1的traceid
    # valid_traceids = group_counts[group_counts > 1].index
    # # 筛选出符合条件的数据
    # df_temp = df_temp[df_temp['traceid'].isin(valid_traceids)]
    print(f"end deal {tar_gz_file_path}")
    return df_temp[[S_or_MS,"traceid"]]
    
    
    
#统计不同服务的请求频率    #目前要记录(不计只有一个task的请求)
def statistic_service_request_number_multi_processing(dataset_path, out_file_path, high_frequency_N, file_number,parallel_num, S_or_MS="service"):
    
    #判断文件是否已经存在
    if os.path.exists(out_file_path):
        print(f"{S_or_MS} 调用次数信息已统计，文件存在 ({out_file_path})")
        return
    else:
        print("开始统计请求次数。（statistic_service_request_number_multi_processing）")
    
    dir="CallGraph"
    df_all=pd.DataFrame()
    
    tar_file_name=list_tar_files(dataset_path+"/"+dir)
    if file_number<=0 or file_number>len(tar_file_name):
        file_number=len(tar_file_name)
    para_list=[]
    for file in tar_file_name[:file_number]:
        para_list.append((dataset_path+"/"+dir+"/"+file, S_or_MS))
    print(para_list)    
    with multiprocessing.Pool(processes=parallel_num) as pool:
        # 提交多个任务
        tasks = [pool.apply_async(request_number_one_process, para) for para in para_list]   
        
        for future in tasks:
            df_temp=future.get()
            if df_all.empty:
                df_all = df_temp
            else:
                df_all = pd.concat([df_all, df_temp])
    if S_or_MS=="service":      
        result = df_all.groupby(S_or_MS)["traceid"].nunique().reset_index() 
        # 更改列名以更清晰地表示  
        result.columns = [S_or_MS, 'call_num'] 
    else:
        result=df_all.groupby(S_or_MS).size().reset_index() 
        # result = df_all.groupby(S_or_MS)["traceid"].nunique().reset_index() 
        # 更改列名以更清晰地表示  
        result.columns = [S_or_MS, 'call_num'] 
        result=result.loc[ ~result[S_or_MS].isin(["UNKNOWN", "UNAVAILABLE"])]
    if high_frequency_N>0:
        result = result.sort_values('call_num', ascending=False).head(high_frequency_N)
    else:
        result = result.sort_values('call_num', ascending=False)
    result.to_csv(out_file_path,index=False)
    print("结束统计请求次数。（statistic_service_request_number_multi_processing）")


def statistic_ms_call_number_for_specific_file(input_file_path, output_file_path):
    #判断文件是否已经存在
    if os.path.exists(output_file_path):
        print(f"文件存在 ({output_file_path})")
        return
    else:
        print("开始统计请求次数。（statistic_ms_call_number_for_specific_file）")
    df = pd.read_csv(input_file_path, usecols=['dm'])
    counts = df['dm'].value_counts().reset_index()
    counts.columns = ['dm', 'count']
    counts.to_csv(output_file_path, index=False)
    
    
    
    
#统计不同服务的请求频率(统计所有的tar中的相关数据，单线程，效率低)
def statistic_service_request_number(S_or_MS):
    root_dir=get_root_dir()
    dataset_dir=os.path.dirname(root_dir)+"/MS_data/data"
    dir="CallGraph"

    df_all=pd.DataFrame()
    tar_file_name=list_tar_files(os.path.join(dataset_dir,dir))
    for file in tar_file_name[:]:
        print(file)
        df_temp=read_csv_from_tar_gz(os.path.join(dataset_dir,dir)+"/"+file)
        # df_temp = pd.read_csv(os.path.join(dataset_dir,dir)+"/CallGraph_20.tar.gz.csv")
        df_temp=df_temp[[S_or_MS,"traceid"]]

        if df_all.empty:
            df_all = df_temp
        else:
            df_all = pd.concat([df_all, df_temp])
    if S_or_MS=="service":
        result = df_all.groupby(S_or_MS)["traceid"].nunique().reset_index() 
        # 更改列名以更清晰地表示  
        result.columns = [S_or_MS, 'unique_traceid_count'] 
    elif S_or_MS=="dm":
        result = df_all.groupby(S_or_MS)["traceid"].count().reset_index() 
        # 更改列名以更清晰地表示  
        result.columns = [S_or_MS, 'traceid_count'] 
    
    result.to_csv(get_datas_dir()+f"/statistic_{S_or_MS}_request_number.csv",index=False)
    print(result)
    
    
if __name__=="__main__":   
    S_or_MS="service"             #service, dm 
    statistic_service_request_number(S_or_MS)







