import os
from util import *
from CallGraph_clean_and_sample import *
from CallGraph_statistic_base import *
from CallGraph_request_number import *



def statistic_pkl(out_file_data_path_t, out_file_path):
    file_name_list=os.listdir(out_file_data_path_t)
    out_string=f"service, all request num, full size, part dict, dynamic dict, ms kind num\n"
    all_deal_time=0
    task_num=0
    all_ms_kind_set=set()
    for file in file_name_list:
        file_path=os.path.join(out_file_data_path_t, file)
        service_name=file.split(".")[0]
        with open(file_path, "rb") as file:
            storage_data=pickle.load(file)
            request=storage_data["base"]
            all_request_num=storage_data["all_request_num"]
            full_size=storage_data["full_size"]
            part_dict=storage_data["part_dict"]
            part_dict_string=""
            for key in part_dict.keys():
                part_dict_string+=f"({key}:{part_dict[key]})"
            
            dynamic_dict=storage_data["new_dynamic_dict"]
            dynamic_dict_string=""
            for key in dynamic_dict.keys():
                dynamic_dict_string+=f"({key}:{dynamic_dict[key]})"
            
            if request!=None:
                ms_kind_all=""
                for ms_kind in request.ms_kind_list:
                    ms_kind_all=ms_kind_all+"|"+ms_kind
                    all_ms_kind_set.add(ms_kind)
                for task in request.task_id_to_obj.values():
                    all_deal_time+=task.ave_deal_time
                    task_num+=1

            
            if all_request_num==0:
                continue
            out_string+=f"{service_name}, {all_request_num}, {full_size}, {part_dict_string}, {dynamic_dict_string}, {ms_kind_all}\n"
    print(f"所有微服务类型的数量：{len(all_ms_kind_set)}")
    print(f"平均处理时长：{all_deal_time/task_num}")
    with open(out_file_path,"w") as file:
        file.write(out_string)
        
    pd_all=pd.read_csv(out_file_path)
    pd_new=pd_all.sort_values(by=" all request num", ascending=False)
    pd_new.to_csv(out_file_path,index=False)
        
def get_combine_service_info(frequncy,all_pkl_info_path,sift_service_path, pkl_dir, base_path):
    
    pd_all=pd.read_csv(all_pkl_info_path)
    print(f"长度：{len(pd_all)}")
    sift_pd=pd_all[pd_all[" all request num"]>=frequncy]
    sift_pd=sift_pd[["service", " all request num"]]
    service_to_base_dict={}
    for index, row in sift_pd.iterrows():
        service_name=row["service"]
        with open(pkl_dir+"/"+service_name+".pkl", "rb") as file:
            storage_data=pickle.load(file)
            base=storage_data["base"]
            service_to_base_dict[service_name]=base
    
    #存储base文件
    with open(base_path, "wb") as file:
        pickle.dump(service_to_base_dict, file)
    #存储统计的service名称文件
    sift_pd.to_csv(sift_service_path, index=False)
    
    
    return

start_time=time.time()
parallel_num=10

# current_time_str=get_time_str()
# current_time_str="07-30-09-01-43"
current_time_str="common"
file_number=0
cur_dir=get_cur_dir()
out_path="/root/autodl-tmp/"
step_list=[ "get_pkl","statistic_and_sift"]#"download_data", "get_frequancy", "split_service",,"get_pkl", "statistic_and_sift"
#step 1: 下载数据集

start_date="0d11"
end_date="0d12"
dataset_dir_name=f"dataset_{start_date}_{end_date}"
dataset_path=out_path+dataset_dir_name
if "download_data" in step_list:
    down_load_dataset(dataset_path, start_date, end_date)


#step 2: 基于原始数据，统计不同service的请求次数（用于减少文件划分，对于调用次数小于100的，后续可不用处理）
high_frequency_N=0
out_file_name_call_num=f"CallGraph_{dataset_dir_name}_CallStop{high_frequency_N}_{current_time_str}.csv"
out_file_path_topcall=out_path+out_file_name_call_num
if "get_frequancy" in step_list:
    statistic_service_request_number_multi_processing(dataset_path, out_file_path_topcall, high_frequency_N, file_number,parallel_num)


#step 3:将数据划分为独立的文件，基于service      60G->5并行
frequency_threshold=100  #由于内存不够，需要多次取值，多次处理  70000,20000,3600,100
percent=1
out_file_dir_name=dataset_dir_name+"_service_split/"
out_file_dir_path=ensure_directory_exists(out_path, out_file_dir_name)+"/"
if "split_service" in step_list:
    clean_and_sample_and_split_dataset_multi_processing(dataset_path,  out_file_dir_path, frequency_threshold, percent, top_n_file_path=out_file_path_topcall, file_number=file_number,parallel_num=7)


#step 4:将前面获取的call graph进行预处理,得到包含base的pkl   (需要时间比较长)
input_file_data_path=out_file_dir_path
out_file_data_path=ensure_directory_exists(out_path, dataset_dir_name+"_pkl/")
if "get_pkl" in step_list:
    statistic_and_get_multi_info_multi_processing(input_file_data_path, out_file_data_path, parallel_num=32)

#step 5:统计pkl,并合并目标service的base,得到包含service名称的csv，用于生成validate数据
frequncy=100
pkl_dir=out_path+dataset_dir_name+"_pkl"
all_pkl_info_path= f"dealed_data/statistic_pkl_{dataset_dir_name}.csv"
sift_service_path=f"dealed_data/statistic_pkl_sift_{dataset_dir_name}.csv"
base_path="dealed_data/"+dataset_dir_name+".base"
if "statistic_and_sift" in step_list:
    statistic_pkl(pkl_dir,all_pkl_info_path)
    get_combine_service_info(frequncy,all_pkl_info_path,sift_service_path, pkl_dir, base_path)
print(f"持续时间：{time.time()-start_time}")