import numpy as np
from util import *
import copy
import pickle
import pymetis
from pymetis import Options 
import scipy.sparse as sp
from itertools import cycle
import random
# import hashlib
# import torch
import json
import os
import math
from Method.GA_for_ours_deploy import GA_algorithm
from Method.Heuristic_init_deploy import Heuristic_algorithm
from collections import Counter
#在算法层面，不知道即将调度的task的dealing time和他的子任务数量


class OurMethod:
    def __init__(self,cloud_system):
        
        self.cloud_system=cloud_system
        self.env=cloud_system.env
        self.print_level=cloud_system.print_level

        #记录结束工作的时间
        self.ms_obj_to_end_time={}
        self.remove_process=None
        return
        
    def init_deploy(self):
        self.init_deploy_with_each_once()
    
    def init_deploy_with_each_once(self):
        init_deploy_strategy_file_path=self.cloud_system.args.aim_service_file+f"_{self.cloud_system.args.bare_metal_node_num}.init_each_once"
        if os.path.exists(init_deploy_strategy_file_path):
            with open(init_deploy_strategy_file_path, "rb") as file:
                storage_data=pickle.load(file)
                ms_kind_list=storage_data["ms_kind_list"]
                node_index_list=storage_data["node_index_list"]
                self.init_deploy_according_to_community_division(ms_kind_list, node_index_list)
        else:
            adj_matrix, ms_kind_list=self.get_adj_matrix(1)
            node_index_list, cut=self.k_way_cut(adj_matrix, self.cloud_system.args.bare_metal_node_num)
            self.init_deploy_according_to_community_division(ms_kind_list, node_index_list)
            
            with open(init_deploy_strategy_file_path, "wb") as file:
                storage_data={
                    "ms_kind_list":ms_kind_list,
                    "node_index_list":node_index_list,
                }
                pickle.dump(storage_data, file)
        return
        
        
    def get_adj_matrix(self, each_ms_kind_deploy_number):
        #基于类型，统计邻接矩阵
        ms_kind_list=self.cloud_system.ms_kind_list
        ms_kind_adj=np.zeros((len(ms_kind_list), len(ms_kind_list)))
        for service_name in self.cloud_system.service_to_callgraph_dict.keys():
            request_t=self.cloud_system.service_to_callgraph_dict[service_name]
            for task_t in request_t.start_task_list:
                self.get_adj_matrix_update_ms_kind_adj(task_t, ms_kind_adj, ms_kind_list)
        #基于类型统计的邻接矩阵，计算计划生成的微服务call 邻接矩阵
        ms_list=[]
        for ms_kind in ms_kind_list:
            for order in range(each_ms_kind_deploy_number):
                ms_list.append(ms_kind)
        ms_adj=np.zeros((len(ms_list), len(ms_list)))
        for i in range(len(ms_list)):
            for j in range(0, i):
                pre_index_i=math.floor(i/each_ms_kind_deploy_number)
                pre_index_j=math.floor(j/each_ms_kind_deploy_number)
                call_num=ms_kind_adj[pre_index_i][pre_index_j]
                ms_adj[i][j]=call_num
                ms_adj[j][i]=call_num
                
        return ms_adj, ms_list
                
    def get_adj_matrix_update_ms_kind_adj(self, task, ms_kind_adj, ms_kind_list):
        for sub_task in task.sub_task_list:
            task_index=ms_kind_list.index(task.ms_kind)
            sub_task_index=ms_kind_list.index(sub_task.ms_kind)
            call_num=task.call_num
            ms_kind_adj[task_index][sub_task_index]+=call_num
            ms_kind_adj[sub_task_index][task_index]+=call_num
            self.get_adj_matrix_update_ms_kind_adj(sub_task, ms_kind_adj, ms_kind_list)
    
    def k_way_cut(self,adj_matrix, partition_num, imbalance=1.2):
        # ideal = 总顶点数 ÷ k
        # 上限 = ideal × (1 + ufactor ⁄ 1000)
        adj=sp.csr_matrix(adj_matrix)
        # ① CSR & 对称化
        csr = adj.tocsr()
        if (csr - csr.T).nnz:
            csr = csr.maximum(csr.T)

        xadj     = csr.indptr.astype(np.int32)
        adjncy   = csr.indices.astype(np.int32)
        eweights = csr.data.astype(np.int32)
        # print(f"xadj:{xadj}\nadjncy:{adjncy}\neweights:{eweights}")
        # ② 负载不均衡：ufactor = (imbalance-1) × 1000
        ufactor  = int(round((imbalance - 1) * 1000))   # 1.05 → 50

        # ③ 构造 Options —— 只能用类，不能用 dict
        opts = Options(ufactor=ufactor, numbering=0)    # numbering=0 表示 C-style 0…n-1

        # ④ 调用 part_graph（无 ubvec / adjwgt）
        objval, parts = pymetis.part_graph(
            partition_num,
            xadj=xadj,
            adjncy=adjncy,
            eweights=eweights,
            options=opts,
        )
        return np.asarray(parts), objval
    
    def init_deploy_according_to_community_division(self, ms_kind_list, node_index_list):
        #这里输入的ms_kind_list和cloud system 中的不一样，这里的可以重复，而cloud system中的不可以重复
        
        for index in range(len(ms_kind_list)):
            ms_kind=ms_kind_list[index]
            node_index=node_index_list[index]
            result, ms_obj= self.cloud_system.bare_metal_node_list[node_index].deploy_ms(ms_kind)
            if result==False:
                print("The initial number of microservices was too large, resulting in the\
                    inability of a single node to accommodate the number of microservices after community division.")
                exit(-1)
            ms_obj.stable_flage_for_ours=True
            
    
    def set_ms_obj_end_time(self, ms_obj, end_time):
        self.ms_obj_to_end_time[ms_obj]=end_time
        return
    
    def update_ms_obj_end_time(self, ms_obj):
        estimate_end_time=ms_obj.get_estimate_end_time()
        self.ms_obj_to_end_time[ms_obj]=estimate_end_time
        return estimate_end_time
    
    def rm_ms_obj_end_time(self, ms_obj):
        del self.ms_obj_to_end_time[ms_obj]
    #######################################################################################
    #入口函数
    def do_schedule_deploy(self):
        #我们的调度方法
        not_scheduled_tasks=[]
        for task in self.cloud_system.pending_task_queue:
            if task.request.request_base==None:
                #获取模板request对象
                base_request=self.cloud_system.service_to_callgraph_dict[task.request.service_name]
                #拷贝一份request模板给task的request
                task.request.set_deepcopy_request_base(copy.deepcopy(base_request))
                #更新start_task_list
                task.request.request_base.update_base_start_task_list(task)
                #赋值可靠性和deadline
                task.request.request_base.reset_for_our_method(task.request.reliability)
                # for task_t in task.request.request_base.task_id_to_obj.values():
                #     task.request.request_base.
            
            spec_request_base=task.request.request_base
            task_base=spec_request_base.task_id_to_obj[task.task_id]
            task.ave_deal_time=task_base.ave_deal_time
            
            replica_num=self.schedule_get_replica_ours(spec_request_base, task_base)
            assert replica_num!=None
            task.set_replica_num(replica_num)
            result=self.schedule_ours_deploy_and_select_ms(task, replica_num, task_base)
            #说明调度失败
            if result==False:
                not_scheduled_tasks.append(task)
                if self.print_level>10:
                    print(f"Time:{round(self.env.now)}\t (SCHE FAILED EVENT) \t{task.request.request_id}\t{task.task_id}")
            else:
                task.request.request_base.scheduled_task_list.append(task)
                
        #清空队列
        self.cloud_system.pending_task_queue=not_scheduled_tasks
        self.reset_ms_remove_event()
        return 
    
    
    #为task选择微服务或者部署新的微服务
    def schedule_ours_deploy_and_select_ms(self, task, replica_num, task_base):
        #获取现在的时间
        time_now=self.cloud_system.env.now
        task.ave_deal_time=task_base.ave_deal_time
        #获取所有正在运行的微服务列表
        ms_obj_list=self.cloud_system.get_available_ms_obj_with_ms_kind(task.ms_kind)
        ms_obj_with_early_start_time_list=[]
        for ms_obj in ms_obj_list:
            #更新最早开始时间
            ms_idle_time=self.update_ms_obj_end_time(ms_obj)   #都是绝对时间
            TT=self.cloud_system.get_TT_time(task, ms_obj, task.first_parent_ms_obj)
            early_start_time=max(time_now+TT, ms_idle_time)
            ms_obj_with_early_start_time_list.append([early_start_time, ms_obj])
        #记录父微服务，用于计算数据传输时间
        if task.parent_task==None:
            parent_ms=None
        else:
            parent_ms=task.parent_task.first_parent_ms_obj
        ##调度到微服务上
        for index in range(replica_num):
            if self.judge_need_new_ms(task, task_base, ms_obj_with_early_start_time_list):
                #部署新的微服务并调度task过去
                ms_obj_new=self.schedule_ours_deploy(parent_ms, task.ms_kind)
                #需要排除无法部署的情况
                if ms_obj_new==None:
                    if index == 0:   #刚开始部署就失败，则调度失败，直接返回
                        return False
                    else:
                        #说明已经部署过至少一次了，可以重复放到同一个MS上
                        assert len(ms_obj_with_early_start_time_list)>0
                        [early_start_time, ms_obj_new]=ms_obj_with_early_start_time_list[0]
                        
                TT_new=self.cloud_system.get_TT_time(task, ms_obj_new, task.first_parent_ms_obj)
                early_start_time_new=max(time_now+TT_new, self.ms_obj_to_end_time[ms_obj_new])
                self.set_ms_obj_end_time(ms_obj_new, early_start_time_new+task_base.ave_deal_time)
                self.cloud_system.env.process(ms_obj_new.put_task_to_queue_event(task))
                #这里面存的应该是下一个任务的最早开始时间，需要加上TT传输时间
                ms_obj_with_early_start_time_list.append([early_start_time_new+task_base.ave_deal_time, ms_obj_new])
                task.aim_ms_obj_list.append(ms_obj_new)
                continue
                
            ms_obj_with_early_start_time_list.sort(key=lambda x:x[0])
            [early_start_time, aim_ms]=ms_obj_with_early_start_time_list[0]
            plan_end_time=early_start_time+task_base.ave_deal_time
            self.set_ms_obj_end_time(aim_ms, plan_end_time)
            self.cloud_system.env.process(aim_ms.put_task_to_queue_event(task)) 
            ms_obj_with_early_start_time_list[0][0]+=task_base.ave_deal_time
            task.aim_ms_obj_list.append(aim_ms)
        return True
    
    def judge_need_new_ms(self, task, task_base, ms_obj_with_early_start_time_list):
        #需要新建 True， 不需要为False
        if len(ms_obj_with_early_start_time_list)==0:
            return True
        ms_obj_with_early_start_time_list.sort(key=lambda x:x[0])
        [early_start_time, aim_ms]=ms_obj_with_early_start_time_list[0]
        #如果新建的最早开始时间小于当前的最早开始时间，则重新创建一个(重新创建一个有冷启动时间)
        if early_start_time>self.env.now+self.cloud_system.get_cold_start_time(aim_ms.ms_kind):
            return True
        else:
            return False
        
    #部署
    def schedule_ours_deploy(self, parent_ms, ms_kind):
        #优先在父微服务部署
        if parent_ms!=None:
            flage, ms_obj =parent_ms.node_obj.deploy_ms(ms_kind)
            if flage==True:
                self.set_ms_obj_end_time(ms_obj, self.env.now+self.cloud_system.get_cold_start_time(ms_kind))
                return ms_obj
        node_num=self.cloud_system.args.bare_metal_node_num
        node_index_list=random.sample(range(node_num), node_num)
        for node_index in node_index_list:
            node=self.cloud_system.bare_metal_node_list[node_index]
            # msid_list=node.msid_to_msobj.keys()
            # for ms_id in msid_list:
                # if node.msid_to_msobj[ms_id].ms_kind!=ms_kind and node.msid_to_msobj[ms_id].can_be_rm():
                #     node.remove_ms(ms_id)
            flage, ms_obj =node.deploy_ms(ms_kind)
            if flage==False:
                continue
            # assert flage==True
            #初始化的时候设置为冷启动结束时间
            self.set_ms_obj_end_time(ms_obj, self.env.now+self.cloud_system.get_cold_start_time(ms_kind))
            return ms_obj
        
        return None
        
    
        
    def reset_ms_remove_event(self):
        try:
            self.remove_process.interrupt()
        except:
            pass
            
        if len(self.ms_obj_to_end_time.keys())>0:
            sorted_keys=sorted(self.ms_obj_to_end_time, key=lambda k:self.ms_obj_to_end_time[k])
            #寻找第一个非常驻微服务
            self.next_remove_ms=None
            for index in range(len(sorted_keys)):
                if sorted_keys[index].stable_flage_for_ours==False:
                    self.next_remove_ms=sorted_keys[index]
                    break
            if self.next_remove_ms==None:
                return
            else:
                interval=max(0, self.ms_obj_to_end_time[self.next_remove_ms]-self.cloud_system.env.now)
                self.remove_process=self.cloud_system.env.process(self.remove_latest_end_ms_obj(interval))
        
    def remove_latest_end_ms_obj(self, interval):
        try:
            yield self.cloud_system.env.timeout(interval)
            if self.next_remove_ms!=None:
                #先删除之前确定的需要删除的微服务
                if self.next_remove_ms.can_be_rm():
                    self.next_remove_ms.node_obj.remove_ms(self.next_remove_ms.ms_id)
                    self.rm_ms_obj_end_time(self.next_remove_ms)
                else:
                    self.ms_obj_to_end_time[self.next_remove_ms]+=100
            
            if len(self.ms_obj_to_end_time.keys())>0:
                sorted_keys=sorted(self.ms_obj_to_end_time, key=lambda k:self.ms_obj_to_end_time[k])
                #寻找第一个非常驻微服务
                self.next_remove_ms=None
                for index in range(len(sorted_keys)):
                    if sorted_keys[index].stable_flage_for_ours==False:
                        self.next_remove_ms=sorted_keys[index]
                        break
                if self.next_remove_ms==None:
                    return
                else:
                    interval=max(0, self.ms_obj_to_end_time[self.next_remove_ms]-self.cloud_system.env.now)
                    self.remove_process=self.cloud_system.env.process(self.remove_latest_end_ms_obj(interval))
            else:
                self.remove_process=None
                
        except:
            pass
        return
    
       
        
    def schedule_get_replica_ours(self, request_base, task_base):
        if task_base.replica_num!=None:
            return task_base.replica_num
        rest_task_list=[]
        for task_t in request_base.task_id_to_obj.values():
            if task_t.task_id in request_base.replica_done_task_id_list:
                # print("continue")
                continue
            
            if "relia" in self.cloud_system.args.heuristic_method:
                if task_t.task_id in request_base.task_id_to_single_reliability.keys():
                    spec_value=request_base.task_id_to_single_reliability[task_t.task_id]
                else:
                    #获取指定微服务故障参数
                    ms_lambda=self.cloud_system.ms_kind_to_transient_lambda[task_t.ms_kind]
                    #获取单个task无故障概率
                    single_reliability=get_reliability_without_failure(task_t.ave_deal_time, ms_lambda)
                    spec_value=single_reliability
                    request_base.task_id_to_single_reliability[task_t.task_id]=single_reliability
                    
            elif "deal_time" in self.cloud_system.args.heuristic_method:
                spec_value=task_t.ave_deal_time
                
            elif "failure_rate" in self.cloud_system.args.heuristic_method:
                spec_value=self.cloud_system.ms_kind_to_transient_lambda[task_t.ms_kind]
            else:
                print(f"heuristic_method wrong :{self.cloud_system.args.heuristic_method}")
                exit(-1)
            rest_task_list.append([spec_value, task_t])
        if "down" in self.cloud_system.args.heuristic_method:
            rest_task_list.sort(key=lambda x:x[0], reverse=True)
        elif "up" in self.cloud_system.args.heuristic_method:
            rest_task_list.sort(key=lambda x:x[0])
        else:
            print(f"heuristic_method wrong :{self.cloud_system.args.heuristic_method}")
            exit(-1)
        
        all_task_num_t=len(rest_task_list)
        already_num_t=0
        #获取剩余可靠性
        rest_reliability=request_base.reliability/request_base.already_reliability
        already_reliability_t=1
        for [spec_value, task_t] in rest_task_list:
            if task_t.task_id in request_base.task_id_to_single_reliability.keys():
                single_reliability=request_base.task_id_to_single_reliability[task_t.task_id]
            else:
                #获取指定微服务故障参数
                ms_lambda=self.cloud_system.ms_kind_to_transient_lambda[task_t.ms_kind]
                #获取单个task无故障概率
                single_reliability=get_reliability_without_failure(task_t.ave_deal_time, ms_lambda)
                request_base.task_id_to_single_reliability[task_t.task_id]=single_reliability
            #获取当前task需要的可靠性大小
            this_req=math.pow(rest_reliability/already_reliability_t, 1/(all_task_num_t-already_num_t))
            
            # #获取指定微服务故障参数
            # ms_lambda=self.cloud_system.ms_kind_to_transient_lambda[task_t.ms_kind]
            # #获取单个task无故障概率
            # single_reliability=get_reliability_without_failure(task_t.ave_deal_time, ms_lambda)
            #获取副本数量
            replica_num=get_aim_replica_num(this_req, single_reliability)
            if task_t==task_base:
                task_base.replica_num=replica_num
                request_base.already_reliability=request_base.already_reliability*(1-math.pow(1-single_reliability, replica_num))
                request_base.replica_done_task_id_list.append(task_t.task_id)
                return replica_num
            else:
                already_reliability_t=already_reliability_t*(1-math.pow(1-single_reliability, replica_num))
                already_num_t+=1
        