import numpy as np
from util import *
import copy
import pickle
import pymetis
from pymetis import Options 
import scipy.sparse as sp
from itertools import cycle
import random
# import hashlib
# import torch
import json
import os
import math
from Method.GA_for_ours_deploy import GA_algorithm
from Method.Heuristic_init_deploy import Heuristic_algorithm
from collections import Counter
#在算法层面，不知道即将调度的task的dealing time和他的子任务数量


class OurMethod:
    def __init__(self,cloud_system):
        self.delay_remove_time=0
        self.cloud_system=cloud_system
        self.env=cloud_system.env
        self.print_level=cloud_system.print_level
        self.state_resource_percent=self.cloud_system.args.state_resource_percent
        # self.neural_network=None
        
        # self.ms_migration=cloud_system.args.our_method_migration
        ####################################
        #call info
        self.ms_call_matrix=None
        ####################################
        #id
        self.ms_id_list_exist=None
        #kind
        self.ms_kind_list_exist=None
        #cluster
        self.ms_node_index_exist=None
        #记录微服务类型的数量
        self.ms_kind_to_number=None
        ####################################
        self.network_deploy_result=False
        
        self.buffer_ms_num=2
        #记录结束工作的时间
        self.ms_obj_to_end_time={}
        self.remove_process=None
        return
        
    def init_deploy(self):
        # self.init_deploy_with_Heuristic()
        # self.init_deploy_with_GA()
        self.init_deploy_with_each_once()
    
    def init_deploy_with_each_once(self):
        init_deploy_strategy_file_path=self.cloud_system.args.aim_service_file+f"_{self.cloud_system.args.bare_metal_node_num}.init_each_once"
        if os.path.exists(init_deploy_strategy_file_path):
            with open(init_deploy_strategy_file_path, "rb") as file:
                storage_data=pickle.load(file)
                ms_kind_list=storage_data["ms_kind_list"]
                node_index_list=storage_data["node_index_list"]
                self.init_deploy_according_to_community_division(ms_kind_list, node_index_list)
        else:
            adj_matrix, ms_kind_list=self.get_adj_matrix(1)
            node_index_list, cut=self.k_way_cut(adj_matrix, self.cloud_system.args.bare_metal_node_num)
            self.init_deploy_according_to_community_division(ms_kind_list, node_index_list)
            
            with open(init_deploy_strategy_file_path, "wb") as file:
                storage_data={
                    "ms_kind_list":ms_kind_list,
                    "node_index_list":node_index_list,
                }
                pickle.dump(storage_data, file)
            
        return
        
        
        return
    def get_adj_matrix(self, each_ms_kind_deploy_number):
        #基于类型，统计邻接矩阵
        ms_kind_list=self.cloud_system.ms_kind_list
        ms_kind_adj=np.zeros((len(ms_kind_list), len(ms_kind_list)))
        for service_name in self.cloud_system.service_to_callgraph_dict.keys():
            request_t=self.cloud_system.service_to_callgraph_dict[service_name]
            for task_t in request_t.start_task_list:
                self.get_adj_matrix_update_ms_kind_adj(task_t, ms_kind_adj,ms_kind_list)
        #基于类型统计的邻接矩阵，计算计划生成的微服务call 邻接矩阵
        ms_list=[]
        for ms_kind in ms_kind_list:
            for order in range(each_ms_kind_deploy_number):
                # ms_id_t=ms_kind+f"||{order}"
                ms_list.append(ms_kind)
        ms_adj=np.zeros((len(ms_list), len(ms_list)))
        for i in range(len(ms_list)):
            for j in range(0, i):
                pre_index_i=math.floor(i/each_ms_kind_deploy_number)
                pre_index_j=math.floor(j/each_ms_kind_deploy_number)
                call_num=ms_kind_adj[pre_index_i][pre_index_j]
                ms_adj[i][j]=call_num
                ms_adj[j][i]=call_num
                
        return ms_adj, ms_list
                
     
    
    def k_way_cut(self,adj_matrix, partition_num, imbalance=1.2):
        # ideal = 总顶点数 ÷ k
        # 上限 = ideal × (1 + ufactor ⁄ 1000)
        adj=sp.csr_matrix(adj_matrix)
        # ① CSR & 对称化
        csr = adj.tocsr()
        if (csr - csr.T).nnz:
            csr = csr.maximum(csr.T)

        xadj     = csr.indptr.astype(np.int32)
        adjncy   = csr.indices.astype(np.int32)
        eweights = csr.data.astype(np.int32)
        # print(f"xadj:{xadj}\nadjncy:{adjncy}\neweights:{eweights}")
        # ② 负载不均衡：ufactor = (imbalance-1) × 1000
        ufactor  = int(round((imbalance - 1) * 1000))   # 1.05 → 50

        # ③ 构造 Options —— 只能用类，不能用 dict
        opts = Options(ufactor=ufactor, numbering=0)    # numbering=0 表示 C-style 0…n-1

        # ④ 调用 part_graph（无 ubvec / adjwgt）
        objval, parts = pymetis.part_graph(
            partition_num,
            xadj=xadj,
            adjncy=adjncy,
            eweights=eweights,
            options=opts,
        )
        return np.asarray(parts), objval
    
    def init_deploy_according_to_community_division(self, ms_kind_list, node_index_list):
        #这里输入的ms_kind_list和cloud system 中的不一样，这里的可以重复，而cloud system中的不可以重复
        self.ms_id_list_exist=[]
        self.ms_kind_list_exist=[]
        self.ms_cluster_index_exist=[]
        self.ms_num_for_ms_kind_exist=[0]*len(self.cloud_system.ms_kind_list)
        self.ms_num_for_ms_kind_aim=[0]*len(self.cloud_system.ms_kind_list)
        for index in range(len(ms_kind_list)):
            ms_kind=ms_kind_list[index]
            # ms_kind=ms_str.split("||")[0]
            node_index=node_index_list[index]
            result, ms_obj= self.cloud_system.bare_metal_node_list[node_index].deploy_ms(ms_kind)
            if result==False:
                print("The initial number of microservices was too large, resulting in the\
                    inability of a single node to accommodate the number of microservices after community division.")
                exit(-1)
            ms_obj.stable_flage_for_ours=True
            self.ms_id_list_exist.append(ms_obj.ms_id)
            self.ms_kind_list_exist.append(ms_kind)
            self.ms_cluster_index_exist.append(node_index)
            index_new=self.cloud_system.ms_kind_list.index(ms_kind)
            self.ms_num_for_ms_kind_exist[index_new]+=1
        
        self.ms_call_matrix=np.zeros((len(self.ms_id_list_exist), len(self.ms_id_list_exist)))
        
    def init_deploy_with_Heuristic(self):
        if self.cloud_system.args.high_frequency_N>0:
            spec=f"_top{self.cloud_system.args.high_frequency_N}"
        else:
            spec=""
        init_deploy_strategy_file_path=self.cloud_system.args.history_base_file+f"_{self.cloud_system.args.bare_metal_node_num}_{self.state_resource_percent}{spec}.Heur_init_deploy"
        if os.path.exists(init_deploy_strategy_file_path):
            with open(init_deploy_strategy_file_path, "rb") as file:
                storage_data=pickle.load(file)
                spec_ms_kind_list=storage_data["ms_kind_list"]
                spec_node_index_list=storage_data["node_index_list"]
                self.cloud_system.ms_kind_to_resource_need=storage_data["resource_cost"]
                self.execute_init_deploy(spec_ms_kind_list, spec_node_index_list)
        else:
            if self.cloud_system.print_level>1:
                print("get init deploy strategy...")
            system_ms_kind_list=self.cloud_system.ms_kind_list
            weight_matrix_t = self.get_weight_matrix(system_ms_kind_list,direct_flage=True)
            # index_list=self.get_ms_index_list(system_ms_kind_list)
            cpu_cost_t = []
            mem_cost_t = []
            disk_cost_t = []
            for ms_kind in system_ms_kind_list:
                cpu_cost_t.append(self.cloud_system.ms_kind_to_resource_need[ms_kind]["cpu"])
                mem_cost_t.append(self.cloud_system.ms_kind_to_resource_need[ms_kind]["mem"])
                disk_cost_t.append(self.cloud_system.ms_kind_to_resource_need[ms_kind]["disk"])
            cpu_cost_t = np.array(cpu_cost_t)
            mem_cost_t = np.array(mem_cost_t)
            disk_cost_t = np.array(disk_cost_t)
            ms_ratio_t = np.sum(weight_matrix_t, axis=1) / np.sum(weight_matrix_t)
            node_num_t = self.cloud_system.args.bare_metal_node_num
            print("start Heuristic...")
            spec_ms_kind_list_t, spec_node_index_list=Heuristic_algorithm(weight_matrix_t, cpu_cost_t, mem_cost_t, disk_cost_t, ms_ratio_t, node_num_t, self.state_resource_percent)
            print(f"微服务总数：{len(spec_ms_kind_list_t)},\n每类数量：{Counter(spec_ms_kind_list_t)}\n微服务详细信息：{spec_ms_kind_list_t}")
            print(f"各个节点上微服务数量：{Counter(spec_node_index_list)}\n对应node ID{spec_node_index_list}")
            
            spec_ms_kind_list=[]
            #需要进行类型转换
            for index in spec_ms_kind_list_t:
                spec_ms_kind_list.append(system_ms_kind_list[index])
                
            with open(init_deploy_strategy_file_path, "wb") as file:
                storage_data={
                    "ms_kind_list":spec_ms_kind_list,
                    "node_index_list":spec_node_index_list,
                    "resource_cost":self.cloud_system.ms_kind_to_resource_need
                }
                pickle.dump(storage_data, file)
            print("end init strategy!")
            self.execute_init_deploy(spec_ms_kind_list, spec_node_index_list)
            
            return
    def get_ms_index_list(self, system_ms_kind_list):
        ms_kind_to_ave_time_all={}
        ms_kind_to_num={}
        print("start get index")
        for service_name in self.cloud_system.service_to_callgraph_dict.keys():
            request_t=self.cloud_system.service_to_callgraph_dict[service_name]
            for task_t in request_t.task_id_to_obj.values():
                if task_t.ms_kind in ms_kind_to_ave_time_all.keys():
                    ms_kind_to_ave_time_all[task_t.ms_kind]+=max(task_t.ave_deal_time, 0.1)*task_t.call_num
                    ms_kind_to_num[task_t.ms_kind]+=task_t.call_num
                else:
                    ms_kind_to_ave_time_all[task_t.ms_kind]=max(task_t.ave_deal_time, 0.1)*task_t.call_num
                    ms_kind_to_num[task_t.ms_kind]=task_t.call_num
        index_list=[]
        for ms_kind in  system_ms_kind_list:
            index_list.append(ms_kind_to_num[ms_kind])
            # if ms_kind_to_ave_time_all[ms_kind]<3600*1000:
            #     index=(3600*1000-ms_kind_to_ave_time_all[ms_kind])/ms_kind_to_num[ms_kind]
            #     if index<10:
            #         index_list.append(ms_kind_to_num[ms_kind])
            #     else:
            #         index_list.append(0)
            # else:
            #     index_list.append(0)
            
        print("end get_index")
        return index_list
        
        
        
    def init_deploy_with_GA(self):
        init_deploy_strategy_file_path=self.cloud_system.args.history_base_file+f"_{self.cloud_system.args.bare_metal_node_num}_{self.state_resource_percent}.GA_init_deploy"
        if os.path.exists(init_deploy_strategy_file_path):
            with open(init_deploy_strategy_file_path, "rb") as file:
                storage_data=pickle.load(file)
                spec_ms_kind_list=storage_data["ms_kind_list"]
                spec_node_index_list=storage_data["node_index_list"]
                self.cloud_system.ms_kind_to_resource_need=storage_data["resource_cost"]
                self.execute_init_deploy(spec_ms_kind_list, spec_node_index_list)
        else:
            if self.cloud_system.print_level>1:
                print("get init deploy strategy...")
            system_ms_kind_list=self.cloud_system.ms_kind_list
            weight_matrix_t = self.get_weight_matrix(system_ms_kind_list)
            cpu_cost_t = []
            mem_cost_t = []
            disk_cost_t = []
            for ms_kind in system_ms_kind_list:
                cpu_cost_t.append(self.cloud_system.ms_kind_to_resource_need[ms_kind]["cpu"])
                mem_cost_t.append(self.cloud_system.ms_kind_to_resource_need[ms_kind]["mem"])
                disk_cost_t.append(self.cloud_system.ms_kind_to_resource_need[ms_kind]["disk"])
            cpu_cost_t = np.array(cpu_cost_t)
            mem_cost_t = np.array(mem_cost_t)
            disk_cost_t = np.array(disk_cost_t)
            ms_ratio_t = np.sum(weight_matrix_t, axis=1) / np.sum(weight_matrix_t)
            node_num_t = self.cloud_system.args.bare_metal_node_num
            print("start GA...")
            best_pop=GA_algorithm(weight_matrix_t, cpu_cost_t, mem_cost_t, disk_cost_t, ms_ratio_t, node_num_t, self.state_resource_percent)
            
            #获取ms_kind_list， node_index_list
            spec_ms_kind_list=[]
            spec_node_index_list=[]
            for node_index in range(best_pop.shape[0]):
                for ms_kind_index in range(best_pop.shape[1]):
                    ms_number=best_pop[node_index][ms_kind_index]
                    for _ in range(ms_number):
                        spec_ms_kind_list.append(system_ms_kind_list[ms_kind_index])
                        spec_node_index_list.append(node_index)
                        
            with open(init_deploy_strategy_file_path, "wb") as file:
                storage_data={
                    "ms_kind_list":spec_ms_kind_list,
                    "node_index_list":spec_node_index_list,
                    "resource_cost":self.cloud_system.ms_kind_to_resource_need
                }
                pickle.dump(storage_data, file)
            print("end init strategy!")
            self.execute_init_deploy(spec_ms_kind_list, spec_node_index_list)
            
            return
        
    def get_weight_matrix(self, system_ms_kind_list, direct_flage=False):
        #基于类型，统计邻接矩阵
        ms_kind_adj=np.zeros((len(system_ms_kind_list), len(system_ms_kind_list)))
        for service_name in self.cloud_system.service_to_callgraph_dict.keys():
            request_t=self.cloud_system.service_to_callgraph_dict[service_name]
            for task_t in request_t.start_task_list:
                self.get_weight_matrix_update(task_t, ms_kind_adj, system_ms_kind_list, direct_flage)
        return ms_kind_adj
        
    def get_weight_matrix_update(self,task, ms_kind_adj, system_ms_kind_list, direct_flage):
        for sub_task in task.sub_task_list:
            task_index=system_ms_kind_list.index(task.ms_kind)
            sub_task_index=system_ms_kind_list.index(sub_task.ms_kind)
            call_num=sub_task.call_num
            ms_kind_adj[task_index][sub_task_index]+=call_num
            if direct_flage==False:  #代表是否区分调用方向，false则表示不区分
                ms_kind_adj[sub_task_index][task_index]+=call_num
            self.get_weight_matrix_update(sub_task, ms_kind_adj, system_ms_kind_list, direct_flage)
        


    def get_adj_kind_matrix(self, each_ms_kind_deploy_number):
        #基于类型，统计邻接矩阵
        ms_kind_list=self.cloud_system.ms_kind_list
        ms_kind_adj=np.zeros((len(ms_kind_list), len(ms_kind_list)))
        for service_name in self.cloud_system.service_to_callgraph_dict.keys():
            request_t=self.cloud_system.service_to_callgraph_dict[service_name]
            for task_t in request_t.start_task_list:
                self.get_adj_matrix_update_ms_kind_adj(task_t, ms_kind_adj,ms_kind_list)
        #基于类型统计的邻接矩阵，计算计划生成的微服务call 邻接矩阵
        ms_list=[]
        for ms_kind in ms_kind_list:
            for order in range(each_ms_kind_deploy_number):
                ms_list.append(ms_kind)
                
        ms_adj=np.zeros((len(ms_list), len(ms_list)))
        for i in range(len(ms_list)):
            for j in range(0, i):
                pre_index_i=math.floor(i/each_ms_kind_deploy_number)
                pre_index_j=math.floor(j/each_ms_kind_deploy_number)
                call_num=ms_kind_adj[pre_index_i][pre_index_j]
                ms_adj[i][j]=call_num
                ms_adj[j][i]=call_num
        return ms_adj, ms_list
    
    
    
    def execute_init_deploy(self, ms_kind_list, node_index_list):
        #这里输入的ms_kind_list和cloud system 中的不一样，这里的可以重复，而cloud system中的不可以重复
        self.ms_id_list_exist=[]
        self.ms_kind_list_exist=[]
        self.ms_node_index_exist=[]
        self.ms_kind_to_number={}
        
        #根据社团划分结果进行部署
        for index in range(len(ms_kind_list)):
            ms_kind=ms_kind_list[index]
            node_index=node_index_list[index]
            result, ms_obj= self.cloud_system.bare_metal_node_list[node_index].deploy_ms(ms_kind, without_cold_start=True)
            assert result==True
            #常驻微服务，不会被删除
            ms_obj.stable_flage_for_ours=True
            self.ms_id_list_exist.append(ms_obj.ms_id)
            self.ms_kind_list_exist.append(ms_kind)
            self.ms_node_index_exist.append(node_index)
            if ms_kind not in self.ms_kind_to_number.keys():
                self.ms_kind_to_number[ms_kind]=1
            else:
                self.ms_kind_to_number[ms_kind]+=1
            
            self.set_ms_obj_end_time(ms_obj, 0)
        
        self.ms_call_matrix=np.zeros((len(self.ms_id_list_exist), len(self.ms_id_list_exist)))
    
    def set_ms_obj_end_time(self, ms_obj, end_time):
        self.ms_obj_to_end_time[ms_obj]=end_time
        return
    
    def update_ms_obj_end_time(self, ms_obj):
        estimate_end_time=ms_obj.get_estimate_end_time()
        self.ms_obj_to_end_time[ms_obj]=estimate_end_time
        return estimate_end_time
    
    def get_ms_obj_end_time(self, ms_obj):
        return self.ms_obj_to_end_time[ms_obj]
    
    def rm_ms_obj_end_time(self, ms_obj):
        del self.ms_obj_to_end_time[ms_obj]
    #######################################################################################
    #入口函数
    def do_schedule_deploy(self):
        #我们的调度方法
        not_scheduled_tasks=[]
        for task in self.cloud_system.pending_task_queue:
            if task.request.request_base==None:
                #获取模板request对象
                base_request=self.cloud_system.service_to_callgraph_dict[task.request.service_name]
                #拷贝一份request模板给task的request
                task.request.set_deepcopy_request_base(copy.deepcopy(base_request))
                #更新start_task_list
                task.request.request_base.update_base_start_task_list(task)
                #赋值可靠性和deadline
                task.request.request_base.reset_for_our_method(task.request.reliability)
                # for task_t in task.request.request_base.task_id_to_obj.values():
                #     task.request.request_base.
            
            spec_request_base=task.request.request_base
            task_base=spec_request_base.task_id_to_obj[task.task_id]
            task.ave_deal_time=task_base.ave_deal_time
            
            replica_num=self.schedule_get_replica_with_cost(spec_request_base, task_base)
            assert replica_num!=None
            task.set_replica_num(replica_num)
            result=self.schedule_ours_deploy_and_select_ms(task, replica_num, task_base)
            #说明调度失败
            if result==False:
                not_scheduled_tasks.append(task)
                if self.print_level>10:
                    print(f"Time:{round(self.env.now)}\t (SCHE FAILED EVENT) \t{task.request.request_id}\t{task.task_id}")
            else:
                task.request.request_base.scheduled_task_list.append(task)
                
        #清空队列
        self.cloud_system.pending_task_queue=not_scheduled_tasks
        self.reset_ms_remove_event()
        return 
    
    
    #为task选择微服务或者部署新的微服务
    def schedule_ours_deploy_and_select_ms(self, task, replica_num, task_base):
        #获取现在的时间
        time_now=self.cloud_system.env.now
        task.ave_deal_time=task_base.ave_deal_time
        #获取所有正在运行的微服务列表
        ms_obj_list=self.cloud_system.get_available_ms_obj_with_ms_kind(task.ms_kind)
        ms_obj_with_early_start_time_list=[]
        for ms_obj in ms_obj_list:
            #更新最早开始时间
            ms_idle_time=self.update_ms_obj_end_time(ms_obj)   #都是绝对时间
            TT=self.cloud_system.get_TT_time(task, ms_obj, task.first_parent_ms_obj)
            early_start_time=max(time_now+TT, ms_idle_time)
            ms_obj_with_early_start_time_list.append([early_start_time, ms_obj])
        #记录父微服务，用于计算数据传输时间
        if task.parent_task==None:
            parent_ms=None
        else:
            parent_ms=task.parent_task.first_parent_ms_obj
        ##调度到微服务上
        for index in range(replica_num):
            if self.judge_need_new_ms(task, task_base, ms_obj_with_early_start_time_list):
                #部署新的微服务并调度task过去
                ms_obj_new=self.schedule_ours_deploy(parent_ms, task.ms_kind)
                #需要排除无法部署的情况
                if ms_obj_new==None:
                    if index == 0:   #刚开始部署就失败，则调度失败，直接返回
                        return False
                    else:
                        #说明已经部署过至少一次了，可以重复放到同一个MS上
                        assert len(ms_obj_with_early_start_time_list)>0
                        [early_start_time, ms_obj_new]=ms_obj_with_early_start_time_list[0]
                        
                TT_new=self.cloud_system.get_TT_time(task, ms_obj_new, task.first_parent_ms_obj)
                early_start_time_new=max(time_now+TT_new, self.ms_obj_to_end_time[ms_obj_new])
                self.set_ms_obj_end_time(ms_obj_new, early_start_time_new+task_base.ave_deal_time)
                self.cloud_system.env.process(ms_obj_new.put_task_to_queue_event(task))
                #这里面存的应该是下一个任务的最早开始时间，需要加上TT传输时间
                ms_obj_with_early_start_time_list.append([early_start_time_new+task_base.ave_deal_time, ms_obj_new])
                task.aim_ms_obj_list.append(ms_obj_new)
                continue
                
            ms_obj_with_early_start_time_list.sort(key=lambda x:x[0])
            [early_start_time, aim_ms]=ms_obj_with_early_start_time_list[0]
            plan_end_time=early_start_time+task_base.ave_deal_time
            self.set_ms_obj_end_time(aim_ms, plan_end_time)
            self.cloud_system.env.process(aim_ms.put_task_to_queue_event(task)) 
            ms_obj_with_early_start_time_list[0][0]+=task_base.ave_deal_time
            task.aim_ms_obj_list.append(aim_ms)
        return True
    
    def judge_need_new_ms(self, task, task_base, ms_obj_with_early_start_time_list):
        #需要新建 True， 不需要为False
        if len(ms_obj_with_early_start_time_list)==0:
            return True
        ms_obj_with_early_start_time_list.sort(key=lambda x:x[0])
        [early_start_time, aim_ms]=ms_obj_with_early_start_time_list[0]
        #如果新建的最早开始时间小于当前的最早开始时间，则重新创建一个(重新创建一个有冷启动时间)
        if early_start_time>self.env.now+self.cloud_system.get_cold_start_time(aim_ms.ms_kind):
            return True
        else:
            return False
        
    #部署
    def schedule_ours_deploy(self, parent_ms, ms_kind):
        #优先在父微服务部署
        if parent_ms!=None:
            flage, ms_obj =parent_ms.node_obj.deploy_ms(ms_kind)
            if flage==True:
                self.set_ms_obj_end_time(ms_obj, self.env.now+self.cloud_system.get_cold_start_time(ms_kind))
                return ms_obj
        node_num=self.cloud_system.args.bare_metal_node_num
        node_index_list=random.sample(range(node_num), node_num)
        for node_index in node_index_list:
            node=self.cloud_system.bare_metal_node_list[node_index]
            # msid_list=node.msid_to_msobj.keys()
            # for ms_id in msid_list:
                # if node.msid_to_msobj[ms_id].ms_kind!=ms_kind and node.msid_to_msobj[ms_id].can_be_rm():
                #     node.remove_ms(ms_id)
            flage, ms_obj =node.deploy_ms(ms_kind)
            if flage==False:
                continue
            # assert flage==True
            #初始化的时候设置为冷启动结束时间
            self.set_ms_obj_end_time(ms_obj, self.env.now+self.cloud_system.get_cold_start_time(ms_kind))
            return ms_obj
        
        return None
        
    
    
        
        
        
    def reset_ms_remove_event(self):
        try:
            self.remove_process.interrupt()
        except:
            pass
            
        if len(self.ms_obj_to_end_time.keys())>0:
            sorted_keys=sorted(self.ms_obj_to_end_time, key=lambda k:self.ms_obj_to_end_time[k])
            #寻找第一个非常驻微服务
            self.next_remove_ms=None
            for index in range(len(sorted_keys)):
                if sorted_keys[index].stable_flage_for_ours==False:
                    self.next_remove_ms=sorted_keys[index]
                    break
            if self.next_remove_ms==None:
                return
            else:
                interval=max(0, self.ms_obj_to_end_time[self.next_remove_ms]-self.cloud_system.env.now+self.delay_remove_time)
                self.remove_process=self.cloud_system.env.process(self.remove_latest_end_ms_obj(interval))
        
    def remove_latest_end_ms_obj(self, interval):
        try:
            yield self.cloud_system.env.timeout(interval)
            if self.next_remove_ms!=None:
                #先删除之前确定的需要删除的微服务
                if self.next_remove_ms.can_be_rm():
                    self.next_remove_ms.node_obj.remove_ms(self.next_remove_ms.ms_id)
                    self.rm_ms_obj_end_time(self.next_remove_ms)
                else:
                    self.ms_obj_to_end_time[self.next_remove_ms]+=100
            
            if len(self.ms_obj_to_end_time.keys())>0:
                sorted_keys=sorted(self.ms_obj_to_end_time, key=lambda k:self.ms_obj_to_end_time[k])
                #寻找第一个非常驻微服务
                self.next_remove_ms=None
                for index in range(len(sorted_keys)):
                    if sorted_keys[index].stable_flage_for_ours==False:
                        self.next_remove_ms=sorted_keys[index]
                        break
                if self.next_remove_ms==None:
                    return
                else:
                    interval=max(0, self.ms_obj_to_end_time[self.next_remove_ms]-self.cloud_system.env.now+self.delay_remove_time)
                    self.remove_process=self.cloud_system.env.process(self.remove_latest_end_ms_obj(interval))
            else:
                self.remove_process=None
                
        except:
            pass
        return
    
            
    
    
    
    
    
        
    def judge_resource_enough(self, node, ms_kind):
        cpu_need=self.cloud_system.ms_kind_to_resource_need[ms_kind]["cpu"]
        mem_need=self.cloud_system.ms_kind_to_resource_need[ms_kind]["mem"]
        disk_need=self.cloud_system.ms_kind_to_resource_need[ms_kind]["disk"]
        if node.cpu_rest>=cpu_need and node.mem_rest>=mem_need and node.disk_rest>=disk_need:
            return True
        else:
            return False
        
    def get_next_remove_idle_ms_for_new(self, node, ms_kind_aim):
        idle_percent_list=[]
        can_removed_ms_obj_list=[]
        for ms_obj in node.msid_to_msobj.values():
            ms_kind=ms_obj.ms_kind
            if self.ms_kind_to_number[ms_kind]==1:
                continue
            
            persist_time=self.cloud_system.env.now-ms_obj.start_time
            if ms_obj.can_be_rm():
                idle_percent=ms_obj.get_idle_percent()
                idle_percent_list.append([idle_percent, ms_obj, persist_time])
        if len(idle_percent_list)==0:
            return [], None
        
        idle_percent_list.sort(key=lambda x:x[0], reverse=True)

        cpu_need=self.cloud_system.ms_kind_to_resource_need[ms_kind_aim]["cpu"]-node.cpu_rest
        mem_need=self.cloud_system.ms_kind_to_resource_need[ms_kind_aim]["mem"]-node.mem_rest
        disk_need=self.cloud_system.ms_kind_to_resource_need[ms_kind_aim]["disk"]-node.disk_rest
        
        for [idle_percent, ms_obj, persist_time] in idle_percent_list:
            can_removed_ms_obj_list.append(ms_obj)
            cpu_need-=ms_obj.cpu_need
            mem_need-=ms_obj.mem_need
            disk_need-=ms_obj.disk_need
            if cpu_need<=0 and mem_need<=0 and disk_need<=0:
                return can_removed_ms_obj_list, len(can_removed_ms_obj_list)-(cpu_need+mem_need+disk_need)/300  #归一化
        return [], None
        
    
    # def remove_idle_deploy_spec_ms(self, ms_kind_new):
    #     # aim_node_obj, ms_obj_rm= self.evaluate_remove_idle_ms(ms_kind_new)
    #     if self.aim_ms_obj_list!=None:
    #         #移除旧的
    #         for ms_obj in self.aim_ms_obj_list:
    #             self.aim_node_obj.remove_ms(ms_obj.ms_id)
    #             self.rm_ms_update_record(ms_obj.ms_id, ms_obj.ms_kind)
    #     #重新部署
    #     _, ms_obj=self.aim_node_obj.deploy_ms(ms_kind_new)
    #     # if ms_obj==None:
    #     #     a=1
    #     node_index=self.cloud_system.bare_metal_node_list.index(self.aim_node_obj)
    #     self.add_ms_update_record(ms_obj.ms_id,ms_kind_new,node_index)
    #     early_start_time=self.cloud_system.env.now+self.cloud_system.get_cold_start_time(ms_kind_new)
    #     return [early_start_time, ms_obj]
    
    #计算平均传输时间TT
    def schedule_get_average_TT(self, task, ms_obj):
        TT=0
        if task.parent_task!=None:
            #处理父task有多个副本的情况，传输时间取平均值
            for parent_ms_obj in task.parent_task.aim_ms_obj_list:
                TT+=self.cloud_system.get_TT_time( task, ms_obj, parent_ms_obj)
            TT=TT/len(task.parent_task.aim_ms_obj_list)
        return TT
    
    
        
    def schedule_get_replica_with_cost(self, request_base, task_base):
        if task_base.replica_num!=None:
            return task_base.replica_num
        rest_task_list=[]
        for task_t in request_base.task_id_to_obj.values():
            if task_t.task_id in request_base.replica_done_task_id_list:
                # print("continue")
                continue
            
            if "relia" in self.cloud_system.args.heuristic_method:
                if task_t.task_id in request_base.task_id_to_single_reliability.keys():
                    spec_value=request_base.task_id_to_single_reliability[task_t.task_id]
                else:
                    #获取指定微服务故障参数
                    ms_lambda=self.cloud_system.ms_kind_to_transient_lambda[task_t.ms_kind]
                    #获取单个task无故障概率
                    single_reliability=get_reliability_without_failure(task_t.ave_deal_time, ms_lambda)
                    spec_value=single_reliability
                    request_base.task_id_to_single_reliability[task_t.task_id]=single_reliability
                    
            elif "deal_time" in self.cloud_system.args.heuristic_method:
                spec_value=task_t.ave_deal_time
                
            elif "failure_rate" in self.cloud_system.args.heuristic_method:
                spec_value=self.cloud_system.ms_kind_to_transient_lambda[task_t.ms_kind]
            else:
                print(f"heuristic_method wrong :{self.cloud_system.args.heuristic_method}")
                exit(-1)
            rest_task_list.append([spec_value, task_t])
        if "down" in self.cloud_system.args.heuristic_method:
            rest_task_list.sort(key=lambda x:x[0], reverse=True)
        elif "up" in self.cloud_system.args.heuristic_method:
            rest_task_list.sort(key=lambda x:x[0])
        else:
            print(f"heuristic_method wrong :{self.cloud_system.args.heuristic_method}")
            exit(-1)
        
        all_task_num_t=len(rest_task_list)
        already_num_t=0
        #获取剩余可靠性
        rest_reliability=request_base.reliability/request_base.already_reliability
        already_reliability_t=1
        for [spec_value, task_t] in rest_task_list:
            if task_t.task_id in request_base.task_id_to_single_reliability.keys():
                single_reliability=request_base.task_id_to_single_reliability[task_t.task_id]
            else:
                #获取指定微服务故障参数
                ms_lambda=self.cloud_system.ms_kind_to_transient_lambda[task_t.ms_kind]
                #获取单个task无故障概率
                single_reliability=get_reliability_without_failure(task_t.ave_deal_time, ms_lambda)
                request_base.task_id_to_single_reliability[task_t.task_id]=single_reliability
            #获取当前task需要的可靠性大小
            this_req=math.pow(rest_reliability/already_reliability_t, 1/(all_task_num_t-already_num_t))
            
            # #获取指定微服务故障参数
            # ms_lambda=self.cloud_system.ms_kind_to_transient_lambda[task_t.ms_kind]
            # #获取单个task无故障概率
            # single_reliability=get_reliability_without_failure(task_t.ave_deal_time, ms_lambda)
            #获取副本数量
            replica_num=get_aim_replica_num(this_req, single_reliability)
            if task_t==task_base:
                task_base.replica_num=replica_num
                request_base.already_reliability=request_base.already_reliability*(1-math.pow(1-single_reliability, replica_num))
                request_base.replica_done_task_id_list.append(task_t.task_id)
                return replica_num
            else:
                already_reliability_t=already_reliability_t*(1-math.pow(1-single_reliability, replica_num))
                already_num_t+=1
        
        # #更新和存储副本信息，可靠性信息
        # request_base.already_reliability=request_base.already_reliability*(1-math.pow(1-single_reliability, replica_num))
        # request_base.already_num+=1
        # task_base.replica_num=replica_num
        # return replica_num
    
    
    def get_idle_ms_num(self, ms_kind):
        ms_obj_list=self.cloud_system.get_available_ms_obj_with_ms_kind(ms_kind)
        idle_ms_num=0
        for ms_obj in ms_obj_list:
            if ms_obj.get_queue_len()==0:
                idle_ms_num+=1
        return idle_ms_num
    
    def schedule_deploy_one_ms(self, ms_kind, aim_node):
        if aim_node!=None:
            flage, ms_id, node_id=aim_node.deploy_ms(ms_kind)
        else:
            flage=False
            
        if flage==True:
            #初始化的时候设置为冷启动结束时间
            self.set_ms_obj_end_time(aim_node.msid_to_msobj[ms_id], self.cloud_system.get_cold_start_time(ms_kind))
            return aim_node.msid_to_msobj[ms_id]
        else:
            node_num=self.cloud_system.args.bare_metal_node_num
            node_index_list=random.sample(range(node_num), node_num)
            for node_index in node_index_list:
                node=self.cloud_system.bare_metal_node_list[node_index]
                flage, ms_id, _ =node.deploy_ms(ms_kind)
                if flage==True:
                    #初始化的时候设置为冷启动结束时间
                    self.set_ms_obj_end_time(node.msid_to_msobj[ms_id], self.cloud_system.get_cold_start_time(ms_kind))
                    return node.msid_to_msobj[ms_id]
        
        print(f"schedule_random_deploy error (capacity lack)!")
        exit(-1)
        
    
        
    # def add_ms_update_record(self, ms_id, ms_kind, ms_cluster_index):
    #     #列表中添加id
    #     self.ms_id_list_exist.append(ms_id)
    #     #添加ms kind信息
    #     self.ms_kind_list_exist.append(ms_kind)
    #     #添加社团索引信息
    #     self.ms_node_index_exist.append(ms_cluster_index)
    #     #更新ms kind 对应的num
    #     self.ms_kind_to_number[ms_kind]+=1
    #     #扩展矩阵
    #     new_row=np.zeros((1,self.ms_call_matrix.shape[1]))
    #     self.ms_call_matrix=np.vstack((self.ms_call_matrix, new_row))
    #     new_col=np.zeros((self.ms_call_matrix.shape[0],1))
    #     self.ms_call_matrix=np.hstack((self.ms_call_matrix, new_col))
        
    # def rm_ms_update_record(self, ms_id, ms_kind):
    #     #列表中删除id
    #     ms_id_index=self.ms_id_list_exist.index(ms_id)
    #     self.ms_id_list_exist.pop(ms_id_index)
    #     #删除ms kind信息
    #     self.ms_kind_list_exist.pop(ms_id_index)
    #     #删除社团索引信息
    #     self.ms_node_index_exist.pop(ms_id_index)
    #     #更新ms kind 对应的num
    #     self.ms_kind_to_number[ms_kind]-=1
    #     assert self.ms_kind_to_number[ms_kind]>0
    #     #缩减矩阵
    #     self.ms_call_matrix=np.delete(self.ms_call_matrix,ms_id_index,0)
    #     self.ms_call_matrix=np.delete(self.ms_call_matrix,ms_id_index,1)
        
        
#***************************************************************************************************************************************  