# coding=utf-8
import os
import time
import threading
import static_schedule.static_scheduler as Scheduler
import gpu_port_ip_map as Port_ip_map

GPU_PER_SERVER = 8
GPU_NUM = 64
LEAF_NUM = 2
SPINE_NUM = 1
TASK_THREADING_MAP = {}
current_command_id = 441
GPU_OCCUPIED_SET = set()
GLOBAL_GPU_LIST = [ 
24,25,26,27,28,29,30,31,
32,33,34,35,36,37,38,39,
48,49,50,51,52,53,54,55,
40,41,42,43,44,45,46,47,
16,17,18,19,20,21,22,23,
0,1,2,3,4,5,6,7,
8,9,10,11,12,13,14,15
]

SERVER_IP_LIST = [ 
"10.174.216.254",
"10.174.216.255",
"10.174.217.5",
"10.174.217.0",
"10.174.217.1",
"10.174.217.3",
"10.174.217.2",
"10.174.217.4",
]

scheduler = Scheduler.static_scheduler(1, 2, 8)

def init_date():
    file = open("time_cost.txt", 'w').close()
    cmd = "date -s \"2022-1-10 00:00:00\"  "
    os.system(cmd)   
    cmd = 'ssh 10.174.216.254  " date -s \\"2022-11-4 00:00:00\\" " '
    os.system(cmd)   
    cmd = 'ssh 10.174.216.255  " date -s \\"2022-11-4 00:00:00\\" " '
    os.system(cmd)   
    cmd = 'ssh 10.174.217.0  " date -s \\"2022-11-4 00:00:00\\" " '
    os.system(cmd)   
    cmd = 'ssh 10.174.217.1  " date -s \\"2022-11-4 00:00:00\\" " '
    os.system(cmd)   
    cmd = 'ssh 10.174.217.3  " date -s \\"2022-11-4 00:00:00\\" " '
    os.system(cmd)   
    cmd = 'ssh 10.174.217.4  " date -s \\"2022-11-4 00:00:00\\" " '
    os.system(cmd)   
    cmd = 'ssh 10.174.217.5  " date -s \\"2022-11-4 00:00:00\\" " '
    os.system(cmd)   
    

def scheduler_interface(taskid, task_GPUS_num, sim_time):
    success = True
    success, gpu_index, job_gpu_leaf_port_map, job_gpu_spine_port_map= scheduler.schedule(task_GPUS_num, taskid, sim_time)
    if success:
        print(gpu_index)
        return success, gpu_index, job_gpu_leaf_port_map, job_gpu_spine_port_map
    else:
        return success, None, None, None


def do_reconfiguration():
    control_ocs()
    control_eps()
    control_server()


def control_ocs():
    pass


def get_acl_para(gpu_leaf_src_map, gpu_spine_dst_map, gpu_comm_obj_map):
    comm_para_list_for_this_task = []
    src_acl_list = []
    global current_command_id
    for key, value in gpu_leaf_src_map.items():
        start_gpu = key
        leaf_outport = value - GPU_NUM
        gpu_ip = Port_ip_map.find_gpu_ip(start_gpu)
        spine_port_ip = Port_ip_map.find_spine_ip(int(leaf_outport))
        leaf_port_name = Port_ip_map.find_hw_leaf_port(start_gpu%32)
        if start_gpu>=int(GPU_NUM/2):
            comm_para_list_for_this_task.append(("H", gpu_ip, spine_port_ip, leaf_port_name, current_command_id))
        else:
            start_vlan_id = Port_ip_map.rg_vlan_list[int(start_gpu/8)]
            comm_para_list_for_this_task.append(("R", "vlan_%s_acl"%start_vlan_id, "%s"%start_vlan_id, gpu_ip, spine_port_ip, "vlan %s"%start_vlan_id, "%s"%current_command_id))
        current_command_id += 1
    for key, value in gpu_spine_dst_map.items():
        dst_gpu = key
        out_spine_port = value - 2*GPU_NUM
        dst_gpu_ip = Port_ip_map.find_gpu_ip(dst_gpu)
        if dst_gpu>=int(GPU_NUM/2):
            leaf_port_ip = Port_ip_map.find_hw_leaf_ip(out_spine_port%32)
        else:
            leaf_port_ip = Port_ip_map.find_rg_leaf_ip(out_spine_port%32)
        for comm_obj_gpu in gpu_comm_obj_map[dst_gpu]:
            start_spine_port = gpu_spine_dst_map[comm_obj_gpu] - 2*GPU_NUM
            start_spine_name = Port_ip_map.find_spine_port(start_spine_port)
            comm_para_list_for_this_task.append(("S", dst_gpu_ip, leaf_port_ip, start_spine_name, current_command_id))
            current_command_id += 1




def control_eps(command_list_for_this_task, taskid):
    for comm_para_list_for_this_task in command_list_for_this_task:
        if comm_para_list_for_this_task[0] == "H":
            cmd = f'python3 switch_control/acl_hw.py --task_id {taskid} --inport {comm_para_list_for_this_task[3]} --source {comm_para_list_for_this_task[1]} --destination None --outport {comm_para_list_for_this_task[2]} --aclId {CURRENT_COMMAND_ID}'
        elif comm_para_list_for_this_task[0] == "S":
            cmd = f'python3 switch_control/acl_hw.py --task_id {taskid} --inport {comm_para_list_for_this_task[3]} --source None --destination {comm_para_list_for_this_task[1]} --outport {comm_para_list_for_this_task[2]} --aclId {CURRENT_COMMAND_ID}'
        else:
            cmd = f'python3 switch_control/acl_control_rg.py  --map_name {comm_para_list_for_this_task[1]} --map_id {comm_para_list_for_this_task[2]} --src_ip {comm_para_list_for_this_task[3]} --rdc_ip {comm_para_list_for_this_task[4]} --int {comm_para_list_for_this_task[5]} --acl_num {comm_para_list_for_this_task[6]}'
        print(cmd)
        #os.system(cmd)  

def control_server():
    pass


def do_a_task_on_real_machine(taskid, task_GPUS_num, batch_size, global_GPU_list):
    use_temp_gpus = ''
    use_local_gpus = []
    server_gpu_num_map = {}
    cur_used_server_num = 0
    for gpu in global_GPU_list:
        use_temp_gpus += str(gpu) + ','
        # 记录占用了哪些GPU，修改全局 GPU_OCCUPIED_SET
        GPU_OCCUPIED_SET.add(gpu)
        server_id = int(gpu/GPU_PER_SERVER)
        if server_id not in server_gpu_num_map:
            cur_used_server_num += 1
            server_gpu_num_map[server_id] = 0
        server_gpu_num_map[server_id] += 1
        use_local_gpus.append(gpu - server_id*GPU_PER_SERVER + (cur_used_server_num-1)*GPU_PER_SERVER)
    use_temp_gpus = use_temp_gpus[:-1]

    server_cmd = ""
    for server_id in server_gpu_num_map:
        server_used_num = server_gpu_num_map[server_id]
        server_cmd += SERVER_IP_LIST[server_id]
        server_cmd += ":"
        server_cmd += str(server_used_num)
        server_cmd += ","
    server_cmd = server_cmd[:len(server_cmd)-1]

    use_local_gpus_str = ""
    for gpu in use_local_gpus:
        use_local_gpus_str += str(gpu)
        use_local_gpus_str += ","
    use_local_gpus_str = use_local_gpus_str[:len(use_local_gpus_str)-1]

    temp_host_ip = server_cmd[:len(server_cmd)-2]
    if temp_host_ip != "10.174.217.2":
        cmd = f'HOROVOD_FUSHION_THRESHOLD=1147483648 HOROVOD_TIMELINE=./log/task{taskid}.log FUSION_SIZE=0\
    FUSION_THREAD_NUM=0 NCCL_MAX_NCHANNELS=1 NCCL_ALGO=Ring\
    horovodrun --verbose --gloo --network-interface "eno1" --log-level DEBUG -np {task_GPUS_num} -H {server_cmd}\
    python3 tensorflow2_synthetic_benchmark.py --model VGG16 --batch_size {batch_size} --use_global_gpus {use_local_gpus_str} --task_id {taskid}'
    else:
        cmd = f'HOROVOD_FUSHION_THRESHOLD=1147483648 HOROVOD_TIMELINE=./log/task{taskid}.log FUSION_SIZE=0\
    FUSION_THREAD_NUM=0 NCCL_MAX_NCHANNELS=1 NCCL_ALGO=Ring\
    horovodrun --verbose --gloo --network-interface "lo" --log-level DEBUG -np {task_GPUS_num} -H {server_cmd}\
    python3 tensorflow2_synthetic_benchmark.py --model VGG16 --batch_size {batch_size} --use_global_gpus {use_local_gpus_str} --task_id {taskid}'


    print(f'Task{taskid} start:')
    print(cmd)

    op = os.popen(cmd)   # 子线程中阻塞等待执行
    ret = op.read()
    print(ret)
    # TODO: 如需获取返回信息，从ret字符串中抓取
    # 返回状态修改释放全局 GPU_OCCUPIED_SET
    for gpu in global_GPU_list:
        GPU_OCCUPIED_SET.remove(gpu)

    f = open('gpu_chosen.txt', 'a')
    f.write(str(taskid)+","+str(global_GPU_list)+"\n")
    f.close()
    
    print(f'Task{taskid} finished.')
    scheduler.update_finished_job(taskid, time.time(), queued_jobs=[])

    del TASK_THREADING_MAP[taskid]

def read_task_from_file( ):
    task_info = []
    txt = open('task_info.txt', "r", encoding="utf-8-sig")
    for line in txt.readlines():
        line = line.strip()  
        temp_info_list = line.split(",")
        for i in range(len(temp_info_list)):
            temp_info_list[i] = int(temp_info_list[i])
        task_info.append((temp_info_list[0],temp_info_list[1],temp_info_list[2],temp_info_list[3]))
    return task_info


def main():
    # 读取要执行的所有任务，TODO: 也可改为从文件读取
    # [(taskid, 任务到来时间, 占用几个GPU，每个GPU上的batch_size)]
    # 第一列参数时间此处一定是从小到大
    init_date()
    tasks_list = read_task_from_file()
    base_time = time.time()
    time_granularity = 1
    while len(tasks_list) > 0:
        while len(tasks_list) > 0:
            taskid = tasks_list[0][0]
            arriving_time = tasks_list[0][1]
            if time.time() - base_time >= arriving_time:
                # TMP: 目前调试默认能分
                task_GPUS_num = tasks_list[0][2]
                batch_size = tasks_list[0][3]

                # TODO: 调用scheduler，返回是否能分配任务，占用哪几个GPU，以及拓扑
                success, global_GPU_list, job_gpu_leaf_port_map, job_gpu_spine_port_map = scheduler_interface(taskid, task_GPUS_num, time.time())
                if success:
                    # comm_para_list_for_this_task = get_acl_para(job_gpu_leaf_port_map, job_gpu_spine_port_map)
                    # control_eps(comm_para_list_for_this_task, taskid)
                    # TODO：修改OCS，和交换机及服务器路由
                    #do_reconfiguration()
                    # TODO: 让Horovod执行该任务，开多线程，线程内阻塞等待任务返回信息
                    t = threading.Thread(target = do_a_task_on_real_machine, args = (taskid, task_GPUS_num, batch_size, global_GPU_list))
                    t.start()
                    TASK_THREADING_MAP[taskid] = t
                    del tasks_list[0]
                else:
                    break
            else:
                break

        time.sleep(time_granularity)

    while len(TASK_THREADING_MAP) > 0:
        time.sleep(1)
    if len(TASK_THREADING_MAP) == 0:
        print("All task finished!")

if __name__ == "__main__":
    main()    
