import simpy
import math
import os
import copy
import pickle
from util import *
import pandas as pd
import numpy as np
from queue import PriorityQueue,Queue
from BareMetalNode import BareMetalNode
from MicroService import MicroService
from Scheduler import Scheduler
from Request import Request
from RequestGenerator import RequestGenerator
import random
import time
import datetime
import gc


#显示所有列
pd.set_option('display.max_columns', None)
pd.set_option('display.width', 1000)
#显示所有行
pd.set_option('display.max_rows',None)
# np.random.seed(55)
# random.seed(55)

class CloudSystem:
    #初始化函数
    def __init__(self,args):
        
        self.args=args
        self.print_level=args.print_level
        
        #存储服务器实例，方便索引
        self.bare_metal_node_list=[]
        #任务队列
        self.pending_task_queue=[]
        #存储返回后仍然未完成的request，等待最后处理
        self.not_finished_request_list=[]
        #存储request 信息
        self.request_info_list=[]
        #存储微服务数量
        self.ms_number_list=[]
        #存储node上平均剩余资源
        self.rest_resource_list=[]
        #存储每个service的响应时间
        self.service_to_response_time={}
        #所有请求的数量
        self.all_request_num=0
        #所有返回的数量
        self.back_all_num=0
        
        self.all_task_num=0
        self.all_task_replica_num=0
        #被拒绝的数量
        self.reject_num=0
        #记录成功的请求数量
        self.back_suc_request_num=0
        #记录成功的请求数量
        self.back_fail_request_num=0
        #记录deadline满足要求的request数量
        self.deadline_suc_request_num=0
        #记录reliability违反的数量
        self.violation_of_reliability_request_num=0
        #记录两个都违反的数量
        self.violation_of_both_request_num=0
        #冷启动时间
        self.cold_start_time={}
        self.cold_start_time_min=83
        self.cold_start_time_max=1100
        #记录空闲时间
        self.ms_kind_to_number={}
        self.ms_kind_to_time_add_delete={}
        self.ms_kind_to_transient_lambda={}
        self.ms_kind_to_resource_need={}
        self.ms_kind_to_call_cost={}
        self.response_time_list=[]
        self.idle_time_list_for_ms_kind={}
        self.idle_start_time_for_ms_kind={}
        self.spec_ms_spec_time={}     #ms->(work_time, idle_time, all_time)
        self.spec_req_spec_time={}
        self.request_cold_start_times=[]
        self.request_hit_pre_deploy_times=[]
        self.resource_statistic_start_time=None
        self.resource_statistic_end_time=None
        #系统结束标志，用于停止微服务内部状态转换
        self.system_end=False
        
        #随机数生成器(固定种子，用于生成服务可靠性和deadline需求)
        #reliability，call cost
        self.rng_general=np.random.default_rng(self.args.random_seed_for_general)
        #生成ms cost(独立的原因是部署方案和Cost有关联)
        self.rng_resource=np.random.default_rng(self.args.random_seed_for_resource)
        self.env = simpy.Environment()
        self.scheduler=Scheduler(self)
        self.request_generator=RequestGenerator()
        self.request_generator.init_for_sim(self, args)
        #our method 中基于历史数据得到的部署方案
        # self.init_deploy_strategy=None
        
        self.pre_deploy_ms_number=0
        
        #响应时间总和
        self.response_time_sum=0
        self.load_aim_service_list()
        self.load_history_info()
        self.init_service_reliability_info()
        self.init_microservice_kind_info()
        self.init_bare_metal_node()
        self.init_ms_deployment()
        
        if self.print_level>1:
            print("cloud system init end.")
            
    def load_aim_service_list(self):
        if self.args.aim_service_name==None:
            pd_service=pd.read_csv(self.args.aim_service_file)
            self.aim_service_list=pd_service["service"].tolist()
        else:
            self.aim_service_list=[self.args.aim_service_name]
        
        
        
    def load_history_info(self):
        if self.print_level>1:
            print("start load historical callgraph info ...")
        #加载callgraph数据
        with open(self.args.history_base_file, "rb") as file:
            service_to_callgraph_dict_temp=pickle.load(file)
        self.service_to_callgraph_dict={}
        for service_name in self.aim_service_list:
            self.service_to_callgraph_dict[service_name]=service_to_callgraph_dict_temp[service_name]
            

    def init_service_reliability_info(self):
        if self.print_level>1:
            print("start init service reliability ...")
        self.service_to_reliability={}
        for service_name in self.service_to_callgraph_dict.keys():
            if self.args.spec_reliability==0:
                self.service_to_reliability[service_name]=self.rng_general.uniform(self.args.min_reliability, self.args.max_reliability)
            else:
                self.service_to_reliability[service_name]=self.args.spec_reliability
        
    def init_microservice_kind_info(self):
        if self.print_level>1:
            print("start init microservice kind relavent information ...")
        #获取微服务类型，指定顺序    
        self.ms_kind_list=[]
        ms_response_time_sum={}
        ms_times={}
        ave_list=[]
        for service_name in self.service_to_callgraph_dict.keys():
            self.ms_kind_list.extend(self.service_to_callgraph_dict[service_name].get_ms_kind_list())
            for task_obj in self.service_to_callgraph_dict[service_name].task_id_to_obj.values():
                # assert task_obj.call_num<=task_obj.ave_deal_time
                call_num=task_obj.call_num
                ave_deal_time=max(task_obj.ave_deal_time,1)
                ave_list.append(ave_deal_time)
                # if call_num>100:
                #     print(ave_deal_time, end="\t")
                if task_obj.ms_kind in ms_response_time_sum.keys():
                    ms_response_time_sum[task_obj.ms_kind]+=ave_deal_time*call_num
                    ms_times[task_obj.ms_kind]+=call_num
                else:
                    ms_response_time_sum[task_obj.ms_kind]=ave_deal_time*call_num
                    ms_times[task_obj.ms_kind]=call_num
        self.ms_kind_list=list(set(self.ms_kind_list))
        self.ms_kind_list.sort()
        if self.print_level>1:
            print(f"ms number: {len(self.ms_kind_list)}")
        
        
        #初始化微服务损失故障率，初始化微服务单位时间价格
        # ms_ave_deal_time=[]
        # sum_t=0
        # call_num=0
        # for ms_kind in self.ms_kind_list:
        #     ms_ave_deal_time.append(ms_response_time_sum[ms_kind]/ms_times[ms_kind])
        # #     sum_t+=ms_response_time_sum[ms_kind]*ms_times[ms_kind]
        # #     call_num+=ms_times[ms_kind]
        # # print(f"mean:{sum_t/call_num}")
        # print(f"ms_deal_time:ave-{np.array(ms_ave_deal_time).mean()}\tmax-{np.array(ms_ave_deal_time).max()}\tmin-{np.array(ms_ave_deal_time).min()}")
        # exit(-1)
        
        # lambda_list=[]
        for ms_kind in self.ms_kind_list:
            cold_start_time=stable_random_by_key(ms_kind, self.cold_start_time_min, self.cold_start_time_max)  #初始化冷启动时间    #record
            self.cold_start_time[ms_kind]=cold_start_time
            # ave_response_time=ms_response_time_sum[ms_kind]/ms_times[ms_kind]
            relia_for_ms=self.rng_general.uniform(0.999, 0.9999)
            
            lambda_t=1/(cold_start_time/(1-relia_for_ms))
            # print(f"{lambda_t}\t",end="")
            # lambda_list.append(lambda_t)
            self.ms_kind_to_transient_lambda[ms_kind]=lambda_t+self.rng_general.uniform(self.args.min_lambda_transient_ms, self.args.max_lambda_transient_ms)
            
            resource_average=self.rng_resource.uniform(1,4)
            self.ms_kind_to_resource_need[ms_kind]={"cpu":resource_average+self.rng_resource.uniform(0,1), "mem":resource_average+self.rng_resource.uniform(0,1), "disk":resource_average+self.rng_resource.uniform(0,1)}
            self.ms_kind_to_call_cost[ms_kind]=self.rng_general.uniform(self.args.min_ms_kind_cost, self.args.max_ms_kind_cost)
            self.ms_kind_to_number[ms_kind]=0
            self.ms_kind_to_time_add_delete[ms_kind]=[]  #record
            self.idle_time_list_for_ms_kind[ms_kind]=[]  #record
            self.idle_start_time_for_ms_kind[ms_kind]=None
        # print(f"lambda_list:ave-{np.array(lambda_list).mean()}\tmax-{np.array(lambda_list).max()}\tmin-{np.array(lambda_list).min()}")
        # exit(-1)
        # print(f"ms_kind_to_transient_lambda:\n{self.ms_kind_to_transient_lambda}")
        # print(f"ms_kind_to_cost:\n{self.ms_kind_to_cost}")
        
    def init_bare_metal_node(self):
        if self.print_level>1:
            print(f"start init bare metal node (sum:{self.args.bare_metal_node_num}) ...")
        #初始化裸金属服务器
        for i in range(self.args.bare_metal_node_num):
            new_node=BareMetalNode(self, f"Node_{i}")
            self.bare_metal_node_list.append(new_node)
        return
    
    def init_ms_deployment(self):
        if self.print_level>1:
            print(f"start init microservice deployment (without cold start)...")
        
        #初始化部署，这里不存在冷启动时间，后续的增加需要考虑冷启动时间
        self.scheduler.init_deploy()
        return
        
    def run(self):
        self.env.process(self.request_com())
        self.env.process(self.record_info())
        if self.args.until_time==0:
            self.env.run()
        else:
            self.env.run(until=self.args.until_time)
    
    def record_info(self):
        ms_num_sum=0
        resource_rest=0
        for node in self.bare_metal_node_list:
            ms_num_sum+=len(node.msid_to_msobj.keys())
            resource_rest=resource_rest+node.cpu_rest+node.mem_rest+node.disk_rest
        
        resource_rest=resource_rest/3/self.args.bare_metal_node_num
        self.ms_number_list.append([self.env.now, ms_num_sum])
        self.rest_resource_list.append([self.env.now, round(resource_rest,2)])
        
        if not self.system_end:
            yield self.env.timeout(10)
            self.env.process(self.record_info())

    # 处理不断到达的请求，并仅请求中的第一个task进行调度。
    def request_com(self):
        #跳过一部分trace
        self.request_generator.jump_request(self.args.jump_request_num)
        #循环生成request
        request_order=0
        while not self.request_generator.is_end():
            # print("start generate!")
            
            # print(f"")
            #生成请求
            request = self.request_generator.generate_request(size=self.args.call_graph_size_min)
            #判断是否生成成功过
            if request==None:
                if self.print_level>10:
                    print(f"Time:{round(self.env.now)}\t (REQUEST GENE FAIL) \tnone")
                continue
            if request.service_name not in self.service_to_callgraph_dict.keys():
                self.request_generator.generate_failure()
                if self.print_level>10:
                    print(f"Time:{round(self.env.now)}\t (REQUEST GENE FAIL) \tservice name wrong")
                continue
            # elif request.part_percent<=self.args.part_percent_min or request.part_percent>self.args.part_percent_max:
            #     self.request_generator.generate_failure()
            #     if self.print_level>10:
            #         print(f"Time:{round(self.env.now)}\t (REQUEST GENE FAIL) \tpart percent fail")
            #     continue
            # elif request.dynamic_percent<=self.args.dynamic_percent_min or request.dynamic_percent>self.args.dynamic_percent_max:
            #     self.request_generator.generate_failure()
            #     if self.print_level>10:
            #         print(f"Time:{round(self.env.now)}\t (REQUEST GENE FAIL) \tdynamic percent fail")
            #     continue
            # elif request.all_task_num<=self.args.call_graph_size_min or request.all_task_num>self.args.call_graph_size_max:
            #     self.request_generator.generate_failure()
            #     if self.print_level>10:
            #         print(f"Time:{round(self.env.now)}\t (REQUEST GENE FAIL) \tcall graph size fail")
            #     continue
            else:
                #获取可靠性指标
                reliability=self.service_to_reliability[request.service_name]
                request.set_reliability(reliability)    #deadline已经生成了，在生成request的时候。
                if self.print_level>5:
                    print(f"Time:{round(self.env.now)}\t (REQUEST COME EVENT) \trequest:****{request.request_id}**** come!")
            self.request_info_list.append(f"({request.request_id},R:{request.reliability},S:{request.service_name},T:{request.trace_id},TN:{request.all_task_num},DP:{round(request.dynamic_percent,2)},PP:{round(request.part_percent,2)})" )
            
            if request_order>=self.args.pre_request_num:
                request.formal_flage=True
            if request_order==self.args.pre_request_num:
                self.resource_statistic_start_time=self.env.now
            for task_t in request.start_task_list:
                task_t.part_id=0
                self.add_one_task(task_t)
            
            request_com_interval = self.request_generator.get_next_interval()
            request_order+=1
            if request_order==self.args.request_num+self.args.pre_request_num:
                self.resource_statistic_end_time=self.env.now
                
            yield self.env.timeout(request_com_interval*self.args.time_expand)   #request_com_interval*
            # gc.collect()
        # print("generate end!")
        # self.request_generator_end=True
        #记录最大资源统计时间
        # self.resource_statistic_end_time=self.env.now
        return
    
    def add_one_task(self,task_ms_pack):
        self.set_part_id(task_ms_pack)
        self.pending_task_queue.append(task_ms_pack)
        self.scheduler.do_schedule_deploy()
        
    def set_part_id(self, task):
        if task.part_id==None:
            if task.parent_task==None:
                task.part_id=0
            else:
                if task.rpc_type=="mq":
                    task.part_id=task.parent_task.part_id+1
                else:
                    task.part_id=task.parent_task.part_id
    
    def end_one_request(self,request):
        
        if request.reject_flage==True:
            suc_flage=False
            if request.formal_flage==True:
                self.reject_num+=1
                    
            if self.print_level>5:
                print(f"Time:{round(self.env.now)}\t (REJECT REQUEST EVENT) \t{request.request_id} --{suc_flage}")
        else:
            
            suc_flage=True
            if request.formal_flage==True:
                self.back_all_num+=1
                self.response_time_sum+=request.response_time
                self.response_time_list.append(request.response_time)
                
                if request.response_time<request.deadline:
                    self.deadline_suc_request_num+=1
                
                #记录响应时间
                if request.service_name in self.service_to_response_time.keys():
                    self.service_to_response_time[request.service_name].append(request.response_time)
                else:
                    self.service_to_response_time[request.service_name]=[request.response_time]
                
            if request.without_failuer_flage:
                if request.formal_flage==True:
                    self.back_suc_request_num+=1
            else:
                suc_flage=False
                if request.formal_flage==True:
                    self.back_fail_request_num+=1
                    self.violation_of_reliability_request_num+=1
                    
            if self.print_level>5:
                print(f"Time:{round(self.env.now)}\t (END  REQUEST EVENT) \t{request.request_id} --{suc_flage}")
            
            if request.is_all_task_done():
                if request.formal_flage==True:
                    task_num, replica_num=request.get_task_num_and_replica_num()
                    self.request_cold_start_times.append(request.cold_start_times)
                    self.request_hit_pre_deploy_times.append(request.hit_pre_deploy_times)
                    self.all_task_num+=task_num
                    self.all_task_replica_num+=replica_num
                    self.spec_req_spec_time[request.request_id]=[request.sum_cold_start_time, request.sum_wait_time, request.sum_deal_time, request.sum_transform_time]
                del request
            else:
                self.not_finished_request_list.append(request)
        
                    

    def deal_pre_unfinished_request(self):
        unfinished_request=[]
         #将之前记录的未完成的request进行统计处理
        for request in self.not_finished_request_list:
            if request.is_all_task_done():
                if request.formal_flage==True:
                    task_num, replica_num=request.get_task_num_and_replica_num()
                    self.request_cold_start_times.append(request.cold_start_times)
                    self.request_hit_pre_deploy_times.append(request.hit_pre_deploy_times)
                    self.all_task_num+=task_num
                    self.all_task_replica_num+=replica_num
                    self.spec_req_spec_time[request.request_id]=[request.sum_cold_start_time, request.sum_wait_time, request.sum_deal_time, request.sum_transform_time]
                del request
            else:
                unfinished_request.append(request)
        self.not_finished_request_list=unfinished_request
        if len(self.not_finished_request_list)==0:
            return True
        else:
            return False
        
    def judge_system_end(self):
        # if self.system_end==True:
        #     print(f"wrong para:{self.args.deploy_schedule_strategy},{self.args.part_percent_min}")
        assert self.system_end==False
        #没有后续请求
        if not self.request_generator.is_end() or len(self.pending_task_queue)>0:
            # print("not self.request_generator.is_end() or len(self.pending_task_queue)>0")
            return False
        #每个节点都空闲
        for node in self.bare_metal_node_list:
            if not node.judge_node_idle():
                # print("not node.judge_node_idle()")
                return False
        #每个请求的剩余部分已处理
        end_flage=self.deal_pre_unfinished_request()
        if end_flage==False:
            return False
        #到这里说明系统已经没有请求也没有等待或者正在做的task了，即仿真系统可以停止运行
        self.system_end=True
        return True
        
        
       
    def end_event(self):
        yield self.env.timeout(0)
        assert self.system_end==True
        self.all_cost=self.cal_system_resource_cost()
        self.all_resource_usage=self.cal_system_resource_usage()
        self.simulation_end_time=self.env.now
        # #注销MS
        for node in self.bare_metal_node_list:
            node.rm_all_ms()
    
    # def remove_all_ms(self):
        
    
        
    
    def get_TT_time(self, this_task, this_task_ms_obj, parent_task_ms_obj, bef_flage=True, base_flage=False):
        if this_task.parent_task == None or parent_task_ms_obj == None or this_task_ms_obj==None:  
            return 0
        elif parent_task_ms_obj.node_obj.node_id == this_task_ms_obj.node_obj.node_id:
            return 0
        else:
            if base_flage==True:
                return 5.5
            else:
                if bef_flage==True:
                    return this_task.tt_datas_bef/self.args.ave_bandwidth
                else:
                    return this_task.tt_datas_aft/self.args.ave_bandwidth
        
    def get_TT_time_cross_node(self, task):
        return task.tt_datas_bef/self.args.ave_bandwidth
        
        
    #获取指定类型的微服务的空闲数量
    def get_idle_num_with_ms_kind(self, ms_kind):
        ms_specific_kind_num=0
        for node in self.bare_metal_node_list:
            ms_specific_kind_num+=node.get_idle_num_with_ms_kind(ms_kind)
        return ms_specific_kind_num
    
    #获取指定类型的微服务对象
    def get_available_ms_obj_with_ms_kind(self, ms_kind):
        ms_obj_list=[]
        for node in self.bare_metal_node_list:
            ms_obj_list.extend(node.get_available_ms_obj_with_ms_kind(ms_kind))
        return ms_obj_list

    def cal_system_resource_cost(self):
        acc_cost=0
        for node in self.bare_metal_node_list:
            acc_cost+=node.get_acc_cost()
        return acc_cost
    
    def cal_system_resource_usage(self):
        acc_cpu_usage=0
        acc_mem_usage=0
        acc_disk_usage=0
        for node in self.bare_metal_node_list:
            cpu, mem, disk=node.get_acc_resource_usage()
            acc_cpu_usage+=cpu
            acc_mem_usage+=mem
            acc_disk_usage+=disk
        return (acc_cpu_usage+acc_mem_usage+acc_disk_usage)/3
    
    def get_cost(self):
        return self.all_cost
    
    def get_resource_usage(self):
        return self.all_resource_usage
    
    def get_reliability_deadline(self):
        
        return self.deadline_suc_request_num/self.back_all_num if self.back_all_num>0 else 0
    
    def get_cold_start_time(self, ms_kind):
        return self.cold_start_time[ms_kind]
        
        
    #获取当前微服务id列表
    def get_ms_id_list_in_system(self):
        ms_id_list=[]
        for node in self.bare_metal_node_list:
            ms_id_list.extend(node.msid_to_msobj.keys())
        return ms_id_list
    
    
    
    def get_mean_ratio_of_deadline_violation_ms_idle(self, ms_kind):
        ms_num=0
        sum_ratio_of_deadline_violation=0
        sum_ratio_of_idle_time=0
        for node_t in self.bare_metal_node_list:
            if ms_kind not in node_t.msk_to_msidlist.keys():
                continue
            for ms_id in node_t.msk_to_msidlist[ms_kind]:
                ms_obj=node_t.msid_to_msobj[ms_id]
                deadline_violation=ms_obj.get_deadline_violation()
                idle_time=ms_obj.get_idle_time()
                
                sum_ratio_of_deadline_violation+=deadline_violation
                sum_ratio_of_idle_time+=idle_time
                ms_num+=1
        return sum_ratio_of_deadline_violation/ms_num, sum_ratio_of_idle_time/ms_num
    
    
    def record_ms_end_relavent_time(self, ms_obj):
        self.ms_kind_to_number[ms_obj.ms_kind]-=1
        if self.ms_kind_to_number[ms_obj.ms_kind]==0:
            self.idle_start_time_for_ms_kind[ms_obj.ms_kind]=self.env.now
        if ms_obj.acc_exist_time>0:
            self.spec_ms_spec_time[ms_obj.ms_id]=[ms_obj.acc_exist_time, ms_obj.acc_working_time, ms_obj.acc_idle_time, ms_obj.cold_start_time]
        #记录微服务持续时间
        self.ms_kind_to_time_add_delete[ms_obj.ms_kind].append([ms_obj.start_time, 1])
        self.ms_kind_to_time_add_delete[ms_obj.ms_kind].append([ms_obj.end_time, -1])
        return
    
    
    
    def record_ms_start_relavent_time(self, ms_obj):
        #判断是否经历了无相应微服务的状态(排除初始化那一次)
        if self.ms_kind_to_number[ms_obj.ms_kind]==0 and self.idle_start_time_for_ms_kind[ms_obj.ms_kind]!=None:
            idle_time=self.env.now-self.idle_start_time_for_ms_kind[ms_obj.ms_kind]
            self.idle_time_list_for_ms_kind[ms_obj.ms_kind].append(idle_time)
        self.ms_kind_to_number[ms_obj.ms_kind]+=1
        
        return
        
    def system_index_statistic(self):
        request_come_num=self.request_generator.request_come_num-self.args.pre_request_num
        if self.print_level>0:
            print(f"\n&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&       simulation result (method:{self.args.deploy_schedule_strategy})    &&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&\n")
            print(f"提交任务数量:{request_come_num}\t\t返回任务数量:{self.back_all_num}\t\t拒绝任务数量:{self.reject_num}")
            print(f"成功任务数量:{self.back_suc_request_num}\t\t失败任务数量:{self.back_fail_request_num}")
        succeed_rate=self.back_suc_request_num/request_come_num if request_come_num!=0 else 0
        succeed_rate_by_back=self.back_suc_request_num/self.back_all_num if self.back_all_num!=0 else 0
        resource_usage=self.get_resource_usage()
        reliability_deadline=self.get_reliability_deadline()
        cost=self.get_cost()
        
        ave_response_time=self.response_time_sum/self.back_all_num if self.back_all_num>0 else 0
        ave_replica_num=round(self.all_task_replica_num/self.all_task_num,8) if self.back_all_num>0 else 0
        real_run_time=time.time()-self.args.start_time
        
        
        if self.print_level>0:
            print(f"资源使用量:{resource_usage}\t\tdeadline可靠性:{reliability_deadline}\n")
            print(f"平均副本数:{ave_replica_num}\n")
            print(f"成功率(总):{round(succeed_rate*100,2)}%\t\t成功率(返回):{round(succeed_rate_by_back*100,2)}%")
            print(f"成本消耗(cost):{cost}\n平均时延(delay):{ave_response_time}")
            print(f"仿真时间(simulation time):{self.simulation_end_time}\n")
            print(f"程序运行时间(real time):{round(real_run_time,2)}秒")
            print(f"第一个请求到达时间：{self.resource_statistic_start_time}\n")
            print(f"最后一个请求到达时间：{self.resource_statistic_end_time}\n")
            
        if self.args.outfile_flage:
            temp_string=f"deploy_schedule_strategy:{self.args.deploy_schedule_strategy}\n"
            temp_string+=f"resource_usage:{resource_usage}\nreliability_deadline:{reliability_deadline}\n"
            temp_string+=f"cost sum:{cost}\nave response time:{ave_response_time}\n"
            temp_string+=f"ave replica num:{ave_replica_num}\nreal run time:{real_run_time}\nsimulation time:{self.simulation_end_time}\n"
            temp_string+=f"request_come_num:{request_come_num}\nrequest_back_num:{self.back_all_num}\n"
            
            temp_string+=f"succeed request num:{self.back_suc_request_num}\nfailure request num:{self.back_fail_request_num}\n"
            temp_string+=f"succeed request num(deadline):{self.deadline_suc_request_num}\nfailure request num(deadline):{self.back_all_num-self.deadline_suc_request_num}\nreject num:{self.reject_num}\n"
            temp_string+=f"sum succeed ratio(%):{round(succeed_rate*100,2)}\n"
            temp_string+=f"first request arrive time:{self.resource_statistic_start_time}\n"
            temp_string+=f"last request arrive time:{self.resource_statistic_end_time}\n"
            temp_string+=f"pre_deploy_ms_number:{self.pre_deploy_ms_number}\n"
            #系统设置
            temp_string+=f"\nheuristic_replica:{self.args.heuristic_replica}\nheuristic_deploy:{self.args.heuristic_deploy}\n"
            temp_string+=f"high_frequency_N:{self.args.high_frequency_N}\njump_request_num:{self.args.jump_request_num}\n"
            temp_string+=f"elastic_deploy:{self.args.elastic_deploy}\n"
            temp_string+=f"priority_queue:{self.args.priority_queue}\nstate_resource_percent:{self.args.state_resource_percent}\n"
            temp_string+=f"bare_metal_node_num:{self.args.bare_metal_node_num}\n"
            temp_string+=f"request_num:{self.args.request_num}\ntrace_id:{self.args.trace_id}\n"
            temp_string+=f"time_expand:{self.args.time_expand}\n"
            temp_string+=f"dynamic_percent_min:{self.args.dynamic_percent_min}\ndynamic_percent_max:{self.args.dynamic_percent_max}\n"
            temp_string+=f"part_percent_min:{self.args.part_percent_min}\npart_percent_max:{self.args.part_percent_max}\n"
            temp_string+=f"random_seed_for_general:{self.args.random_seed_for_general}\nrandom_seed_for_resource:{self.args.random_seed_for_resource}\n"
            temp_string+=f"deadline_factor:{self.args.deadline_factor}\n"
            temp_string+=f"spec_reliability:{self.args.spec_reliability}\n"
            temp_string+=f"min_reliability:{self.args.min_reliability}\n"
            temp_string+=f"max_reliability:{self.args.max_reliability}\n"
            temp_string+=f"min_lambda_transient_ms:{self.args.min_lambda_transient_ms}\n"
            temp_string+=f"max_lambda_transient_ms:{self.args.max_lambda_transient_ms}\n"
            temp_string+=f"min_ms_kind_cost:{self.args.min_ms_kind_cost}\n"
            temp_string+=f"max_ms_kind_cost:{self.args.max_ms_kind_cost}\n"
            temp_string+=f"ave_bandwidth(MB):{self.args.ave_bandwidth*1000/1024/1024}\n"
            temp_string+=f"until_time:{self.args.until_time}\nprint_level:{self.args.print_level}\n"
            # temp_string+=f"GSMS_init_deploy_flage:{self.args.GSMS_init_deploy_flage}\n"
           
            temp_string+=f"validate_trace_file:{self.args.validate_trace_file}\n"
            temp_string+=f"history_base_file:{self.args.history_base_file}\n"
            #将self.service_to_response_time进行统计计算平均值
            service_to_response_time={}
            for service_name in self.service_to_response_time.keys():
                mean_rt=round(float(np.array(self.service_to_response_time[service_name]).mean()),2)
                service_to_response_time[service_name]=mean_rt
            temp_string+=f"service_to_response_time:{repr(service_to_response_time)}\n"
            
            temp_string+=f"ms_number_list:{self.ms_number_list}\nrest_resource_list:{self.rest_resource_list}\n"
            if self.args.request_num<10000:
                temp_string+=f"request_info:{self.request_info_list}\n"
                
            out_full_path=ensure_directory_exists(get_cur_dir()+"/"+self.args.out_dir)
            with open(out_full_path+"/"+self.args.outfile_name, "w") as file:
                file.write(temp_string)
            
            
            for ms_kind in self.ms_kind_list:
                if len(self.idle_time_list_for_ms_kind[ms_kind])==0:
                    del self.idle_time_list_for_ms_kind[ms_kind]
            
            with open(out_full_path+"/"+self.args.outvar_file_name, "wb") as file:
                storage_data={
                    "cold_start_time":self.cold_start_time,
                    "spec_ms_spec_time":self.spec_ms_spec_time,                      #ms_id 对应的相关时间（空闲，工作等时间）
                    "idle_time_list_for_ms_kind":self.idle_time_list_for_ms_kind,    #微服务类型对应的空闲时间
                    "ms_kind_to_time_add_delete":self.ms_kind_to_time_add_delete,    #微服务类型对应的增加，减少微服务时间
                    "response_time":self.response_time_list,                         #响应时间列表
                    "request_cold_start_times":self.request_cold_start_times,        #请求所经历的冷启动次数
                    "request_hit_pre_deploy_times":self.request_hit_pre_deploy_times,#请求命中预部署微服务的次数
                    "spec_req_spec_time":self.spec_req_spec_time                     #请求对应的相关时间
                }
                pickle.dump(storage_data, file)
            
            if self.print_level>20:
                print(f"idle_time_list_for_ms_kind:{self.idle_time_list_for_ms_kind}\n")
                print(f"spec_ms_spec_time:{self.spec_ms_spec_time}\n")
                
        return ave_replica_num, ave_response_time
    

def run_once(args=None):
    
    if args==None:
        args=Args()
        
        #ours, GSMS, RR, R_RIR, QFEC, C_GM
        args.deploy_schedule_strategy = "GSMS"
        args.priority_queue=False
        args.state_resource_percent=0.3
        args.aim_service_name=None
        args.bare_metal_node_num=100
        args.high_frequency_N=0
        args.heuristic_replica="up_deal_time"
        args.heuristic_deploy="time_cover"
        args.jump_request_num=0
        args.elastic_deploy=True
        args.deploy_para=1.0
        args.pre_deploy=True
        args.dynamic_replica=True
        #负载设置
        args.request_num=1000
        args.pre_request_num=10
        args.trace_id=0
        args.time_expand=0.1
        args.no_cut_time_in_queue=1000
        ####################################
        #动态比例范围(大的包含，小的不包含)
        args.dynamic_percent_min=0
        args.dynamic_percent_max=1
        #分区比例范围(大的包含，小的不包含)
        args.part_percent_min=0
        args.part_percent_max=1
        #call graph 规模(大的包含，小的不包含)
        args.call_graph_size_min=0
        args.call_graph_size_max=99999
        ########################################
        #随机数种子
        args.random_seed_for_general=10
        args.random_seed_for_resource=10
        
        #deadline需求设置
        args.deadline_factor=1
        #可靠性需求设置
        args.spec_reliability=0.9999
        args.min_reliability=0.95
        args.max_reliability=0.9999
        #泊松分布参数设置
        args.min_lambda_transient_ms=0.0001/3600000 #1/30/3600000+
        args.max_lambda_transient_ms=0.001/3600000  #1/3600000+
        #微服务调用成本设置
        args.min_ms_kind_cost=0.06
        args.max_ms_kind_cost=0.9
        #带宽设置
        args.ave_bandwidth=20*1024*1024/1000  #20MBps
        #仿真时间设置
        args.until_time=0
        #输出设置
        args.print_level=21
        args.outfile_flage=True
        
        # args.GSMS_init_deploy_flage=False
        # args.ours_adjust_ms_flage=False  
             
        # args.history_trace_file=get_root_dir()+"/data_process/dealed_data/CallGraph_dataset_0d11_0d12_cleaned_0.5_s66_history_05-29-11-41-10.csv"
        # args.validate_trace_file=get_root_dir()+"/data_process/dealed_data/CallGraph_dataset_0d11_0d12_cleaned_0.5_s77_validate_05-29-11-41-10.csv"
        args.out_dir="output"
        args.aim_service_file=get_root_dir()+"/data_process/dealed_data/statistic_pkl_sift_dataset_0d11_0d12.csv"
        
        args.history_base_file=get_root_dir()+"/data_process/dealed_data/dataset_0d11_0d12.base"
    #获取验证数据集路径    
    if args.high_frequency_N>0:
        spec=f"top{args.high_frequency_N}_"
    else:
        spec=""
    if round(args.dynamic_percent_max-args.dynamic_percent_min,1)>0.2 and args.call_graph_size_max-args.call_graph_size_min>10:   #表明不作为筛选条件
        args.validate_trace_file=get_dataset_dir()+f"/CallGraph_dataset_0d12_0d13_{spec}cleaned_0.1_s{args.trace_id}_validate.csv"
        args.our_method_plan_file=get_root_dir()+f"/data_process/dealed_data/heuristic_plan_main_para{round(args.deploy_para,1)}.csv"
    elif round(args.dynamic_percent_max-args.dynamic_percent_min,1)==0.2:
        args.validate_trace_file=get_dataset_dir()+f"/CallGraph_dataset_0d12_0d13_dynamic{round(args.dynamic_percent_min,1)}_{round(args.dynamic_percent_max,1)}_validate.csv"
        args.our_method_plan_file=get_root_dir()+f"/data_process/dealed_data/heuristic_plan_dynamic{round(args.dynamic_percent_min,1)}_{round(args.dynamic_percent_max,1)}_.csv"
    elif args.call_graph_size_max-args.call_graph_size_min==10:
        args.validate_trace_file=get_dataset_dir()+f"/CallGraph_dataset_0d12_0d13_size{round(args.call_graph_size_min)}_{round(args.call_graph_size_max)}_validate.csv"
        args.our_method_plan_file=get_root_dir()+f"/data_process/dealed_data/heuristic_plan_size{round(args.call_graph_size_min)}_{round(args.call_graph_size_max)}_.csv"
    else:
        print(f"args wrong ({round(args.dynamic_percent_max,1)}, {round(args.dynamic_percent_min,1)}, {args.call_graph_size_max}, {args.call_graph_size_min}: {round(args.dynamic_percent_max,1)-round(args.dynamic_percent_min,1)}) ")
        exit(-1)
        
    np.random.seed(args.random_seed_for_general)
    random.seed(args.random_seed_for_general)
        
        
    if args.outfile_flage:
        now_time= datetime.datetime.now()
        formatted_time = now_time.strftime('%m_%d_%H_%M_%S')
        version=f"v3.6.0_{args.deploy_schedule_strategy}_node{args.bare_metal_node_num}_trace{args.trace_id}_request{args.request_num}_timeExp{args.time_expand}_specR{args.spec_reliability}_para{args.deploy_para}_dynamic{round(args.dynamic_percent_min,2)}-{round(args.dynamic_percent_max,2)}_scale{args.call_graph_size_min}-{args.call_graph_size_max}_resoP{args.state_resource_percent}_heurR{args.heuristic_replica}_heurD{args.heuristic_deploy}_preDeF{args.pre_deploy}_dyReF{args.dynamic_replica}"
        #part{round(args.part_percent_min,2)}-{round(args.part_percent_max,2)}_sname{args.aim_service_name}_elastic{args.elastic_deploy}
        
        args.outfile_name=f"Run_statistic_"+version+"_"+formatted_time+".txt"
        args.outvar_file_name=f"Run_statistic_"+version+"_"+formatted_time+".pkl"
    #下面开始运行
       
    cloud_system=CloudSystem(args)
    args.start_time=time.time()
    cloud_system.run()
    succeed_rate, cost=cloud_system.system_index_statistic()
    return succeed_rate, cost
    
if __name__=="__main__":
    run_once()
    


