import copy
import math
import re
import time
import numpy as np
import static_locality_schedule.utils as utils
import static_locality_schedule.job as job
import static_locality_schedule.connection_manager as connection_manager
import static_locality_schedule.leaf_resource_manager as leaf_resource_manager
import static_locality_schedule.server_resource_manager as server_resource_manager
import static_locality_schedule.spine_resource_manager as spine_resource_manager

class StaticPlacementer:
    def __init__(self,  spine_switch_num, leaf_switch_num, spine_switch_port_num, leaf_switch_port_num, server_num, oxc_num = 32, banned_server_list = []):
        self.gpu_num = spine_switch_num*spine_switch_port_num
        self.server_num = server_num
        self.leaf_num = leaf_switch_num
        self.spine_num = spine_switch_num
        self.gpu_per_server = int(self.gpu_num/server_num)
        self.gpu_per_leaf = int(self.gpu_num/self.leaf_num)
        self.port_per_spine = int(self.gpu_num/self.spine_num)
        print("Cluster Info:")
        print("server_num: "+" ")
        print(server_num)
        print("leaf_num: "+" ")
        print(leaf_switch_num)
        print("gpu_num: "+" ")
        print(self.gpu_num)
        self.server_resource_manager_ = server_resource_manager.ServerResourceManager(server_num, self.gpu_per_server, self.leaf_num, banned_server_list)
        self.leaf_resource_manager_ = leaf_resource_manager.LeafResourceManager(self.leaf_num, self.gpu_per_leaf)
        self.spine_resource_manager_ = spine_resource_manager.SpineSwitchManager(self.spine_num, self.port_per_spine)
        self.connection_manager_ = connection_manager.ConnectionManager(self.gpu_num, self.server_num, self.leaf_num, self.spine_num)

        # job queue
        self.current_job_list = {}
        self.history_job_list = {}

        self.leaf_port_list = [[0 for i in range(self.gpu_per_leaf)] for i in range(self.leaf_num)]
        self.single_spine_port_list = [0 for i in range(self.port_per_spine)]
        self.job_gpu_spine_port_map = {}
        self.job_gpu_leaf_port_map = {}

    def choose_a_leaf_port(self, leaf_id):
        to_chose_port_id = -1
        for i in range(self.gpu_per_leaf):
            if self.leaf_port_list[leaf_id][i] == 0:
                to_chose_port_id = i
                self.leaf_port_list[leaf_id][i] = 1
                return to_chose_port_id
        return to_chose_port_id

    def choose_a_spine_port(self, port_id):
        assert self.single_spine_port_list[port_id] == 0
        self.single_spine_port_list[port_id] = 1

    def choose_leaf_spine_port(self, job_id, choose_leaf_port_map, choose_spine_port_num, gpu_list):
        to_choose_leaf_port_global = []
        for leaf_id in choose_leaf_port_map:
            for temp_chosen_num in range(choose_leaf_port_map[leaf_id]):
                to_choose_leaf_port_global.append(leaf_id*self.gpu_per_leaf + self.choose_a_leaf_port(leaf_id) + self.gpu_num)
        to_choose_spine_port_global = []
        for temp_chosen_num in range(choose_spine_port_num):
            to_choose_spine_port_global.append(to_choose_leaf_port_global[temp_chosen_num]+self.gpu_num)
            self.choose_a_spine_port(to_choose_leaf_port_global[temp_chosen_num]-self.gpu_num)
        assert len(to_choose_leaf_port_global) == len(to_choose_spine_port_global)
        assert len(to_choose_leaf_port_global) == len(gpu_list)

        temp_leaf_spine_pair = []
        for temp_id in range(len(to_choose_leaf_port_global)):
            temp_leaf_spine_pair.append((gpu_list[temp_id], to_choose_spine_port_global[temp_id]))
        self.job_gpu_spine_port_map[job_id] = temp_leaf_spine_pair

        temp_gpu_leaf_pair = []
        for temp_id in range(len(to_choose_leaf_port_global)):
            temp_gpu_leaf_pair.append((gpu_list[temp_id], to_choose_leaf_port_global[temp_id]))
        self.job_gpu_leaf_port_map[job_id] = temp_gpu_leaf_pair

    def relase_leaf_spine_port(self, job_id):
        temp_gpu_spine_pair = self.job_gpu_spine_port_map[job_id]
        for gpu_spine_port_pair in temp_gpu_spine_pair:
            assert self.single_spine_port_list[gpu_spine_port_pair[1]-2*self.gpu_num] == 1
            self.single_spine_port_list[gpu_spine_port_pair[1]-2*self.gpu_num] = 0

        temp_gpu_leaf_pair = self.job_gpu_leaf_port_map[job_id]
        for gpu_leaf_port_pair in temp_gpu_leaf_pair:
            leaf_id = int((gpu_leaf_port_pair[1]-self.gpu_num)/self.gpu_per_leaf)
            leaf_loacl_port = (gpu_leaf_port_pair[1]-self.gpu_num) - leaf_id*self.gpu_per_leaf
            assert self.leaf_port_list[leaf_id][leaf_loacl_port] == 1
            self.leaf_port_list[leaf_id][leaf_loacl_port] = 0

        del self.job_gpu_spine_port_map[job_id]
        del self.job_gpu_leaf_port_map[job_id]

    def schedule(self, gpu_num, job_id, sim_time, queued_jobs=[]):
        print("some job arrive: "+str(job_id)+","+str(gpu_num))
        time_start = time.perf_counter()
        new_job = job.Job(job_id)
        chosen_gpu_list = []
        # 情况零：GPU数量不足
        if gpu_num > self.server_resource_manager_.cal_remain_gpu_num():
            print("finish allocation, no resource0")
            return False,None,None,None
        if not self.server_resource_manager_.whether_can_find_valid_server(gpu_num):
            print("finish allocation, no resource1")
            return False,None,None,None

        potentional_leaf_list = []
        # Step1. 在leaf_resource_manager中选取合适的leafgroup
        for temp_leaf_id in range(self.leaf_num):
            require_server_num = math.ceil(gpu_num/self.gpu_per_server)
            require_gpu_num_in_server = min(self.gpu_per_server,gpu_num)
            valid_server_num = 0
            for temp_server_num in range(int(temp_leaf_id*self.gpu_per_leaf/self.gpu_per_server), int((1+temp_leaf_id)*self.gpu_per_leaf/self.gpu_per_server),1):
                if self.server_resource_manager_.server_list[temp_server_num].remain_gpu_num()>=require_gpu_num_in_server:
                    valid_server_num += 1
            if valid_server_num>=require_server_num:
                potentional_leaf_list.append([temp_leaf_id, sum(self.leaf_resource_manager_.leaf_list[temp_leaf_id].leaf_group)])
        potentional_leaf_list.sort( key=lambda x: (x[1])) 
        if len(potentional_leaf_list)>0:
            temp_leaf_id = potentional_leaf_list[0][0]
            #  Step2 在选择的leaf交换机下联的server中按照locality选择gpu
            chosen_gpu_list = self.server_resource_manager_.choose_gpu_in_one_leaf(temp_leaf_id, gpu_num)
            self.leaf_resource_manager_.leaf_list[temp_leaf_id].update_leaf_group_with_required_num(gpu_num)
            # gpu - leaf links
            for output_gpu_index in chosen_gpu_list:
                assert int(output_gpu_index/self.gpu_per_leaf) == temp_leaf_id
                output_leaf_index = utils.get_leaf_module_id(temp_leaf_id, self.gpu_num)
            #  记录job
                new_job.start_time = sim_time
                new_job.allocated_gpus = chosen_gpu_list
                self.current_job_list[job_id] = new_job
            print("finish allocation1")
            f2 = open('queue_length.txt','a')
            f2.write(str(len(queued_jobs)))
            f2.write(",")
            f2.write(str(sim_time) )
            f2.write("\n" )
            f2.close()                
            return True, chosen_gpu_list, None,None

        # 否则需要跨leaf通信

        self.server_resource_manager_.release_gpu_in_server(chosen_gpu_list)
        self.leaf_resource_manager_.release_group_with_given_gpu_list(chosen_gpu_list)
        allocate_success = False
        choose_group_in_spine_result = self.spine_resource_manager_.choose_group_in_spine(gpu_num)
        if choose_group_in_spine_result[0]:
            choosed_spine_index_list = choose_group_in_spine_result[1]
            leaf_remain_empt_server_list = []
            for temp_leaf_id in range(self.leaf_num):
                leaf_remain_empt_server_list.append(0)
            for temp_server_id in range(self.server_num):
                temp_leaf_id = int(temp_server_id/self.gpu_per_leaf*self.gpu_per_server)
                if self.gpu_per_server in self.server_resource_manager_.server_list[temp_server_id].gpu_group:
                    leaf_remain_empt_server_list[temp_leaf_id] += 1
            chosen_gpu_list = []
            job_allocated_leaf_spine_link = {}
            job_used_spine_port_num_pair = {}
            temp_spine_index = 0
            for chosen_spine_id in choosed_spine_index_list:
                chosen_group_size = choose_group_in_spine_result[2][temp_spine_index]
                temp_spine_index += 1
                if chosen_group_size != int(gpu_num/len(choosed_spine_index_list)):
                    print("fuck", choose_group_in_spine_result[2], gpu_num)
                valid, server_occupy_gpuNum_map = self.connection_manager_.find_valid_gpu_for_specific_spine(chosen_group_size, chosen_spine_id, self.server_resource_manager_.return_server_remain_gpuNum_map(),job_allocated_leaf_spine_link,job_used_spine_port_num_pair, leaf_remain_empt_server_list)
                if(not valid):
                    self.spine_resource_manager_.release_spine_group_with_give_id_and_group(chosen_spine_id, chosen_group_size)
                    print("finish allocation, no resource due to locality3", len(choosed_spine_index_list), gpu_num)
                    self.leaf_resource_manager_.print_remain_leaf_port_num()
                    self.spine_resource_manager_.print_remain_spoine_port_num()
                    self.spine_resource_manager_.print_resource_info()
                    return False,None,None,None
                for server_id in server_occupy_gpuNum_map:
                    if server_occupy_gpuNum_map[server_id]>0:
                        chosen_gpu_list.extend(self.server_resource_manager_.server_list[server_id].occupy_gpu_with_required_num(server_occupy_gpuNum_map[server_id])[1])
            chosen_leaf_id_num_list = self.leaf_resource_manager_.update_group_with_given_gpu_list(chosen_gpu_list)

            choose_leaf_port_map = {}
            temp_leaf_to_spine_map = {} # key 为leaf的index，value为另一个map B， map B的key为spine交换机的index，value为该leaf要新连多少根线到该spine
            for choosed_leaf_id_num_pair in chosen_leaf_id_num_list:
                choose_leaf_port_map[choosed_leaf_id_num_pair[0]] = choosed_leaf_id_num_pair[1]
                temp_leaf_to_each_spine_map = {}
                for choosed_spine_index in choosed_spine_index_list:
                    temp_leaf_to_each_spine_map[choosed_spine_index] = int(choosed_leaf_id_num_pair[1]/len(choosed_spine_index_list))
                temp_leaf_to_spine_map[choosed_leaf_id_num_pair[0]] = temp_leaf_to_each_spine_map                

            new_job.start_time = sim_time
            new_job.allocated_gpus = chosen_gpu_list
            new_job.job_leaf_to_spine_map = temp_leaf_to_spine_map
            new_job.job_allocated_leaf_spine_link = job_allocated_leaf_spine_link
            new_job.used_spine_port_num_pair = job_used_spine_port_num_pair
            self.current_job_list[job_id] = new_job
            print("finish allocation assign whole clos for large job")
            if len(queued_jobs)>2:
                f1 = open('utility_timeline.txt','a')
                f1.write(str(1-self.server_resource_manager_.cal_remain_gpu_num()/self.leaf_num/self.gpu_per_leaf) )
                f1.write(",")
                f1.write(str(sim_time) )
                f1.write("\n" )
                f1.close()
            f2 = open('queue_length.txt','a')
            f2.write(str(len(queued_jobs)))
            f2.write(",")
            f2.write(str(sim_time) )
            f2.write("\n" )
            f2.close()     
            if len(chosen_gpu_list) != gpu_num:
                print(len(chosen_gpu_list), gpu_num, len(choosed_spine_index_list))
            assert len(chosen_gpu_list) == gpu_num
            self.choose_leaf_spine_port(job_id, choose_leaf_port_map, gpu_num, chosen_gpu_list)
            # topology = {}
            # for (key,value) in self.job_gpu_spine_port_map[job_id]:
            #     assert key not in topology
            #     topology[key] = value
            # for (key,value) in self.job_gpu_leaf_port_map[job_id]:
            #     assert key not in topology
            #     topology[key] = value
            return True, chosen_gpu_list, self.job_gpu_leaf_port_map[job_id], self.job_gpu_spine_port_map[job_id]
        if not allocate_success:
            print("finish allocation, no resource2 ",gpu_num)
            self.leaf_resource_manager_.print_remain_leaf_port_num()
            self.spine_resource_manager_.print_remain_spoine_port_num()
            self.connection_manager_.print_connection_info()
            return False,None,None,None

                
    def update_finished_job(self, job_id, sim_time, queued_jobs=[]):
        if job_id in self.job_gpu_spine_port_map:
            self.relase_leaf_spine_port(job_id)
        free_spine = 0
        for i in range(self.spine_num):
            if self.port_per_spine in self.spine_resource_manager_.spine_list[i].spine_group:
                free_spine += 1
        print("some job finish" + str(job_id))
        to_leave_job = copy.deepcopy(self.current_job_list[job_id])
        to_leave_job.finish_time = sim_time
        self.history_job_list[job_id] = to_leave_job
        self.server_resource_manager_.release_gpu_in_server(to_leave_job.allocated_gpus)
        self.leaf_resource_manager_.release_group_with_given_gpu_list(to_leave_job.allocated_gpus)
        spine_portNum_map = {}
        for leaf_id in to_leave_job.job_allocated_leaf_spine_link:
            for spine_id in to_leave_job.job_allocated_leaf_spine_link[leaf_id]:
                used_port_num = to_leave_job.job_allocated_leaf_spine_link[leaf_id][spine_id]
                if used_port_num>0:
                    if spine_id not in spine_portNum_map:
                        spine_portNum_map[spine_id] = 0
                    spine_portNum_map[spine_id] += used_port_num
        for spine_id in spine_portNum_map:
            self.spine_resource_manager_.release_spine_group_with_give_id_and_group(spine_id, spine_portNum_map[spine_id])
        self.connection_manager_.release_connection_resource(to_leave_job.job_allocated_leaf_spine_link)
        del self.current_job_list[job_id]
        f2 = open('queue_length.txt','a')
        f2.write(str(len(queued_jobs)))
        f2.write(",")
        f2.write(str(sim_time) )
        f2.write("\n" )
        f2.close()            

if __name__ == "__main__":
    scheduler = StaticPlacementer(spine_switch_num=1, leaf_switch_num=2, spine_switch_port_num=64, leaf_switch_port_num=32, server_num=8, oxc_num = 32, banned_server_list = [7])
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(4,0,1)
    # print(gpu_index)
    # print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    # print(scheduler.leaf_port_list)
    # print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(4,1,1)
    # print(gpu_index)
    # print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    # print(scheduler.leaf_port_list)
    # print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(8,2,1)
    # print(gpu_index)
    # print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    # print(scheduler.leaf_port_list)
    # print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(8,3,1)
    # print(gpu_index)
    # print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    # print(scheduler.leaf_port_list)
    # print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(8,5,1)
    # print(gpu_index)
    # print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    # print(scheduler.leaf_port_list)
    # print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(8,6,1)
    # print(gpu_index)
    # print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    # print(scheduler.leaf_port_list)
    # print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(8,7,1)
    # print(gpu_index)
    # print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    # print(scheduler.leaf_port_list)
    # print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(4,8,1)
    print(gpu_index)
    print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    print(scheduler.leaf_port_list)
    print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(4,9,1)
    print(gpu_index)
    print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    print(scheduler.leaf_port_list)
    print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    scheduler.update_finished_job(2,2)
    print(scheduler.leaf_port_list)
    print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    scheduler.update_finished_job(7,2)
    print(scheduler.leaf_port_list)
    print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()
    success,gpu_index,job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler.schedule(16,10,3)
    print(gpu_index)
    print(job_gpu_leaf_port_map, job_gpu_spine_port_map)
    print(scheduler.leaf_port_list)
    print(scheduler.single_spine_port_list)
    scheduler.server_resource_manager_.cal_remain_gpu_num()


                
                