import AI_WeDo.DDPG.DDPG_Main as DDPG
import Env.EnvMain as Env
import math
import numpy as np
import gc
import profile  # 诊断程序运行效率
from concurrent.futures import ProcessPoolExecutor as PPE
import multiprocessing as mp

action_dim=[2,1]    # 连续2，离散1
action_bound=10/180*math.pi     # 动作取值范围
state_dim=5     # 输入特征维度
state_dim_2=0       # 第二层输入特征的维度
n_friend=1  # 我方数量
n_enemy=1   # 地方数量

def First_step_strategy():      # FirstStep的策略
    strategy=[]
    select=np.random.randint(0,120)
    strategy.append([0,select])
    return strategy


if __name__=='__main__':
    gc.disable()

    algorithms=[]
    Envms=[]

    for i in range(8):
        algorithm=DDPG.ddpg(action_dim,state_dim,action_bound,First_step_strategy(),
                            OUTPUT_GRAPH=False,RENDER=False)
        Envm=Env.ACEnv(n_friend,n_enemy,最大时间=DDPG.MAX_EP_STEPS)
        algorithms.append(algorithm)
        Envms.append(Envm)

    with PPE(max_workers=8) as Pool:
        for i in range(8):
            Pool.submit(algorithms[i].main,Envms[i])