import os
import time
import threading

GPU_PER_SERVER = 8
TASK_THREADING_MAP = {}
GPU_OCCUPIED_SET = set()
GPU_IP_LIST = [ 
"192.168.1.2",    
"192.168.1.3",    
"192.168.1.4",    
"192.168.1.5",    
"192.168.1.6",    
"192.168.1.7",    
"192.168.1.8",    
"192.168.1.9",  
"192.168.1.10",    
"192.168.1.11",    
"192.168.1.12",    
"192.168.1.13",    
"192.168.1.14",    
"192.168.1.15",    
"192.168.1.16",    
"192.168.1.17",  
"192.168.2.2",    
"192.168.2.3",    
"192.168.2.4",    
"192.168.2.5",    
"192.168.2.6",    
"192.168.2.7",    
"192.168.2.8",    
"192.168.2.9",  
"192.168.2.10",    
"192.168.2.11",    
"192.168.2.12",    
"192.168.2.13",    
"192.168.2.14",    
"192.168.2.15",    
"192.168.2.16",    
"192.168.2.17",  
"192.168.3.2",    
"192.168.3.3",    
"192.168.3.4",    
"192.168.3.5",    
"192.168.3.6",    
"192.168.3.7",    
"192.168.3.8",    
"192.168.3.9",  
"192.168.3.10",    
"192.168.3.11",    
"192.168.3.12",    
"192.168.3.13",    
"192.168.3.14",    
"192.168.3.15",    
"192.168.3.16",    
"192.168.3.17",  
"192.168.4.2",    
"192.168.4.3",    
"192.168.4.4",    
"192.168.4.5",    
"192.168.4.6",    
"192.168.4.7",    
"192.168.4.8",    
"192.168.4.9",  
"192.168.4.10",    
"192.168.4.11",    
"192.168.4.12",    
"192.168.4.13",    
"192.168.4.14",    
"192.168.4.15",    
"192.168.4.16",    
"192.168.4.17",  
]

SERVER_IP_LIST = [ 
"10.174.216.252",
"10.174.216.253",
"10.174.216.250",
"10.174.216.251",
"10.174.216.246",
"10.174.216.248",
"10.174.216.247",
"10.174.216.249",
]



IP_LIST = []

def scheduler_interface(taskid, task_GPUS_num):
    # TODO
    success = True

    # TMP: test
    if taskid == 0:
        global_GPU_list = [i for i in range(task_GPUS_num)]
    elif taskid == 1:
        global_GPU_list = [i for i in range(4, 4 + task_GPUS_num)]
    elif taskid == 2:
        global_GPU_list = [i for i in range(6, 6 + task_GPUS_num)]

    topology = []   
    return success, global_GPU_list, topology


def do_reconfiguration():
    control_ocs()
    control_eps()
    control_server()


def control_ocs():
    pass


def control_eps():
    pass


def control_server():
    pass


def do_a_task_on_real_machine(taskid, task_GPUS_num, batch_size, global_GPU_list):
    use_global_gpus = ''
    server_gpu_num_map = {}
    for gpu in global_GPU_list:
        use_global_gpus += str(gpu) + ','
        # 记录占用了哪些GPU，修改全局 GPU_OCCUPIED_SET
        GPU_OCCUPIED_SET.add(gpu)
        server_id = int(gpu/GPU_PER_SERVER)
        if server_id not in server_gpu_num_map:
            server_gpu_num_map[server_id] = 0
        server_gpu_num_map[server_id] += 1

    use_global_gpus = use_global_gpus[:-1]

    server_cmd = ""
    for server_id in server_gpu_num_map:
        server_used_num = server_gpu_num_map[server_id]
        server_cmd += SERVER_IP_LIST[server_id]
        server_cmd += ":"
        server_cmd += str(server_used_num)
        server_cmd += ","
    server_cmd = server_cmd[:len(server_cmd)-1]

    cmd = f'HOROVOD_FUSHION_THRESHOLD=1147483648 HOROVOD_TIMELINE=./task{taskid}.log FUSION_SIZE=0\
 FUSION_THREAD_NUM=0 NCCL_MAX_NCHANNELS=1 NCCL_ALGO=butterfly\
 horovodrun --verbose --gloo --network-interface "lo" --log-level DEBUG -np {task_GPUS_num} -H {server_cmd}\
 python3 tensorflow2_synthetic_benchmark.py --model ResNet50 --batch_size {batch_size} --use_global_gpus {use_global_gpus}'


    print(f'Task{taskid} start:')
    print(cmd)

    op = os.popen(cmd)   # 子线程中阻塞等待执行
    ret = op.read()
    print(ret)
    # TODO: 如需获取返回信息，从ret字符串中抓取

    # 返回状态修改释放全局 GPU_OCCUPIED_SET
    for gpu in global_GPU_list:
        GPU_OCCUPIED_SET.remove(gpu)
    print(f'Task{taskid} finished.')
    del TASK_THREADING_MAP[taskid]


def main():
    # 读取要执行的所有任务，TODO: 也可改为从文件读取
    # [(taskid, 任务到来时间, 占用几个GPU，每个GPU上的batch_size)]
    # 第一列参数时间此处一定是从小到大
    tasks_list = [
        (0, 0, 4, 4),
        (1, 10, 2, 32),
        (2, 10, 2, 32)
    ]
    

    base_time = time.time()
    time_granularity = 1
    while len(tasks_list) > 0:
        while len(tasks_list) > 0:
            taskid = tasks_list[0][0]
            arriving_time = tasks_list[0][1]
            if time.time() - base_time >= arriving_time:
                # TMP: 目前调试默认能分
                task_GPUS_num = tasks_list[0][2]
                batch_size = tasks_list[0][3]

                # TODO: 调用scheduler，返回是否能分配任务，占用哪几个GPU，以及拓扑
                success, global_GPU_list, topology = scheduler_interface(taskid, task_GPUS_num)
                # TODO：修改OCS，和交换机及服务器路由
                do_reconfiguration()

                
                # TODO: 让Horovod执行该任务，开多线程，线程内阻塞等待任务返回信息
                t = threading.Thread(target = do_a_task_on_real_machine, args = (taskid, task_GPUS_num, batch_size, global_GPU_list))
                t.start()
                TASK_THREADING_MAP[taskid] = t
                del tasks_list[0]
            else:
                break

        time.sleep(time_granularity)

    while len(TASK_THREADING_MAP) > 0:
        time.sleep(1)
    if len(TASK_THREADING_MAP) == 0:
        print("All task finished!")

if __name__ == "__main__":
    main()    
