import datetime
import multiprocessing
from util import *
import copy
from CloudSystem import run_once
import gc

def generate_default_args():
    args=Args()
    args.request_num=10000
    args.out_dir="output_ablation_v1"
    args.bare_metal_node_num=20
    args.state_resource_percent=0.3
    args.time_expand=0.1
    args.print_level=0
    args.spec_reliability=0.9999
    
    return args


def run_once_this(method, priority_queue, state_resource_percent, elastic_deploy, trace_id):
    print(f"start once {method}, {priority_queue}, {state_resource_percent}, {elastic_deploy}, {trace_id}")
    args=generate_default_args()
    
    args.deploy_schedule_strategy=method
    args.priority_queue=priority_queue
    args.state_resource_percent=state_resource_percent
    args.elastic_deploy=elastic_deploy
    if elastic_deploy==False:
        args.bare_metal_node_num=200
    else:
        args.bare_metal_node_num=20
    args.trace_id=trace_id
    args_copy=copy.deepcopy(args)
    run_once(args_copy)
    gc.collect()
    print(f"End once {method}, {priority_queue}, {state_resource_percent}, {elastic_deploy}, {trace_id}")
    
    





task_args_list=[]
method="ours"
# service_name=None
for elastic_deploy in [ False]:
    for trace_id in range(10):
        
            #full
            priority_queue=True
            state_resource_percent=0.3
            task_args_list.append((method, priority_queue, state_resource_percent, elastic_deploy, trace_id))
            #no PriorityQueue
            priority_queue=False
            state_resource_percent=0.3
            task_args_list.append((method, priority_queue, state_resource_percent, elastic_deploy, trace_id))
            
            if elastic_deploy==True:
                #no state_resource_percent
                priority_queue=True
                state_resource_percent=0
                task_args_list.append((method, priority_queue, state_resource_percent, elastic_deploy, trace_id))
                #both no
                priority_queue=False
                state_resource_percent=0
                task_args_list.append((method, priority_queue, state_resource_percent, elastic_deploy, trace_id))
    
    
    
    
    
        
                
with multiprocessing.Pool(processes=10) as pool:
        #   starmap 直接开始所有的
        #   apply_async 一个一个参数放入执行
        
        interval=0
        results=[]
        index=0
        while index<len(task_args_list):
            # length=min(batch_size, len(task_args_list)-index )
            result=pool.apply_async(run_once_this, task_args_list[index])
            results.append(result)
            time.sleep(interval)
            index+=1
            
        for result in results:
            result.get()



print("End all sub threadings")
