from util import *
import pandas as pd
import numpy as np
import sys
sys.path.append(f"{get_root_dir()}/MS_system")
from Request import *
from RequestGenerator import *
import pickle
from concurrent.futures import ThreadPoolExecutor
import threading
import time

# 设置显示选项
pd.set_option('display.max_rows', None)      # 显示完整行数
pd.set_option('display.max_columns', None)   # 显示完整列数
pd.set_option('display.width', None)         # 扩展显示宽度
pd.set_option('display.max_colwidth', None)  # 显示完整列宽

        
def get_base_one_process(service_name_list, input_file_data_path):
    print(f"start deal {len(service_name_list)}")
    request_base_dict={}
    
    request_generator=RequestGenerator()
    request_generator.init_for_analyze(input_file_data_path)
    while not request_generator.is_end():
        
        request = request_generator.generate_request(service_name_list)
        if request!=None:
            if request.service_name not in request_base_dict.keys():
                request.reset_as_base()
                request_base_dict[request.service_name]=request
            else:
                request_base_dict[request.service_name].merge(request)
    for service_name in request_base_dict.keys():
        request_base_dict[service_name].merge_end()
        request_base_dict[service_name].update_request_statistic_info()
    pbar.update(1)
    print(f"end deal {len(service_name_list)}")
    return  request_base_dict

from tqdm import tqdm
import multiprocessing 
import math
global pbar 
pbar=None

def statistic_and_get_base_request_multi_processing(input_file_data_path, top_n_file_path, parallel_num):
    global pbar
    out_file_path=input_file_data_path+".base"
    if os.path.exists(out_file_path):
        print(f"文件存在！({out_file_path})")
        return
    if top_n_file_path==None:
        top_n_service_list=list(set(pd.read_csv(input_file_data_path)["service"].unique()))
        print(top_n_service_list)
    else:
        top_pd=pd.read_csv(top_n_file_path)
        top_n_service_list=top_pd["service"].tolist()
    service_to_callgraph_dict={}
    para_list=[]
    pbar=tqdm(total=len(top_n_service_list))
    group_num=100
    each_group_num=math.ceil(len(top_n_service_list)/group_num)
    service_name_list=[]
    for index, service_name in enumerate(top_n_service_list):
        if (index%each_group_num==each_group_num-1 and len(service_name_list)>0) or index==len(top_n_service_list)-1:
            para_list.append((service_name_list, input_file_data_path))
            service_name_list=[]
        else:
            service_name_list.append(service_name)
            
    with multiprocessing.Pool(processes=parallel_num) as pool:
        # 提交多个任务
        tasks = [pool.apply_async(get_base_one_process, para) for para in para_list]    

        for future in tasks:
            request_base_dict=future.get()
            for service_name in request_base_dict.keys():
                service_to_callgraph_dict[service_name]=request_base_dict[service_name]
            
    #根据调用次数降序排序
    sorted_service_name = sorted(service_to_callgraph_dict, key=lambda p: service_to_callgraph_dict[p].request_call_num, reverse=True)
    for service_name in sorted_service_name:
        print(f"{service_name}:{service_to_callgraph_dict[service_name].request_call_num}", end="\t")
            
    print(f"\nservice name list:{service_to_callgraph_dict.keys()}")
    with open(out_file_path, "wb") as file:
        pickle.dump(service_to_callgraph_dict, file)
    
def get_multi_info_one_process(service_name, input_file_data_dir, out_file_data_dir):
    # global pbar
    print(f"start deal {service_name}")
    input_file_data_path=input_file_data_dir+service_name+".csv"
    request_base=None
    
    request_generator=RequestGenerator()
    request_generator.init_for_analyze(input_file_data_path)
    
    part_dict={}
    dynamic_dict={}
    full_size=0
    all_request_num=0
    
    while not request_generator.is_end():
        request = request_generator.generate_request([service_name])
        if request!=None and request.all_task_num>1:
            all_request_num+=1
            #记录part信息
            part_percent=round(request.part_percent,2)
            if part_percent in part_dict.keys():
                part_dict[part_percent]+=1
            else:
                part_dict[part_percent]=1
            #记录动态信息
            size=request.all_task_num
            if size in dynamic_dict.keys():
                dynamic_dict[size]+=1
            else:
                dynamic_dict[size]=1
        
        
            if request_base==None:
                request.reset_as_base()
                request_base=request
            else:
                request_base.merge(request)
    
    #统计分析
    if request_base==None:
        all_request_num=0
        full_size=0
        part_dict={}
        new_dynamic_dict={}
    else:                
        request_base.merge_end()
        request_base.update_request_statistic_info()
        full_size=request_base.all_task_num
        new_dynamic_dict={}
        for size in dynamic_dict.keys():
            dynamic=round(size/full_size,2)
            if dynamic not in new_dynamic_dict.keys():
                new_dynamic_dict[dynamic]=dynamic_dict[size]
            else:
                new_dynamic_dict[dynamic]+=dynamic_dict[size]
    
    out_file_path_name=out_file_data_dir+service_name+".pkl"
    with open(out_file_path_name, "wb") as file:
                storage_data={
                    "all_request_num":all_request_num,
                    "full_size":full_size,
                    "part_dict":part_dict,
                    "new_dynamic_dict":new_dynamic_dict,
                    "base":request_base
                }
                pickle.dump(storage_data, file)
                
    print(f"end deal {service_name}")
    
    
    
    return

def statistic_and_get_multi_info_multi_processing(input_file_data_dir, out_file_data_dir, parallel_num):
    global pbar
    
    file_name_list = os.listdir(input_file_data_dir)
    aim_service_list=[]
    for file in file_name_list:
        service_name=file.split(".")[0]
        if not os.path.isfile(out_file_data_dir+"/"+service_name+".pkl"):
            file_size=os.path.getsize(os.path.join(input_file_data_dir,file))/(1024 * 1024)
            print(f"service:{service_name}, size:{file_size}MB")
            # if file_size>=1:
            aim_service_list.append((service_name,file_size))
    if len(aim_service_list)==0:
        print("all service get base done")
        return
    
    aim_service_list.sort(key=lambda x:x[1])
    
    print(f"need done num:{len(aim_service_list)}")
    para_list=[]
    pbar=tqdm(total=len(aim_service_list))
    for (service_name,file_size) in aim_service_list:
        para_list.append((service_name, input_file_data_dir, out_file_data_dir))
    
            
    with multiprocessing.Pool(processes=parallel_num) as pool:
        # 提交多个任务
        tasks = [pool.apply_async(get_multi_info_one_process, para) for para in para_list]    

        for future in tasks:
            request_base_dict=future.get()
            pbar.update(1)
            
    
        


if __name__=="__main__":    
    
    file_path="CallGraph_cleaned_0.005_s10_history.csv"

    thread_num=20
    # statistic_and_get_base_request_multi_processing(file_path, file_path+".var_new",5,thread_num)

    #已有的service信息 按照list给出service的名称

    #每个service的callgraph信息，包含每个call 的pre, deal ，aft时间，以及概率
    
# def get_service_name_list(csv_file_path):
#     pd_all=pd.read_csv(csv_file_path)
#     service_name_list=pd_all['service'].unique()
#     print(f"服务名称: {service_name_list}")

    
# def statistic_and_get_base_request(input_file, out_put_file, high_frequency_N):
#     data_file_dir=os.path.join(get_cur_dir(),"dealed_data")
#     if os.path.exists(os.path.join(data_file_dir,out_put_file)):
#         print(f"文件存在！({out_put_file})")
#         return
#     service_to_callgraph_dict={}
#     request_generator=RequestGenerator()
#     request_generator.init_for_analyze(os.path.join(data_file_dir,input_file))
    
    
#     while not request_generator.is_end_for_analyze():
#         request=request_generator.generate_request()
#         if request==None:
#             continue
#         if request.service_name not in service_to_callgraph_dict.keys():
#             request.reset_as_base()
#             service_to_callgraph_dict[request.service_name]=request
#         else:
#             base_request=service_to_callgraph_dict[request.service_name]
#             base_request.merge(request)
            
#     for request in service_to_callgraph_dict.values():
#         request.merge_end()
#         request.update_all_sub_task_num()
#     #根据调用次数降序排序
#     sorted_service_name = sorted(service_to_callgraph_dict, key=lambda p: service_to_callgraph_dict[p].request_call_num, reverse=True)
#     selected_service_name=sorted_service_name[:high_frequency_N]
#     for service_name in sorted_service_name:
#         if service_name not in selected_service_name:
#             del service_to_callgraph_dict[service_name]
#         else:
#             print(f"service name:{service_name}, call num:{service_to_callgraph_dict[service_name].request_call_num}")
            
#     print(f"service name list:{service_to_callgraph_dict.keys()}")
#     with open(os.path.join(data_file_dir,out_put_file), "wb") as file:
#         pickle.dump(service_to_callgraph_dict, file)