# encoding:UTF-8
from multiprocessing import cpu_count
import numpy as np
import time
import tensorflow as tf

from network_module.network_module import Policy_Net
from agent_module.agent_module_0 import Agent as Agent_0
from agent_module.agent_module_1 import Agent as Agent_1
from agent_module.agent_module_2 import Agent as Agent_2
from agent_module.agent_module_3 import Agent as Agent_3

gamma = 0.99  # discount factor for reward
#batch_size = 50  # batch_size 为每次训练网络所选择的batch_size, 这里batch_size等于episodes数
# 运行变量
total_episodes = 100 * 10000  # 总共的episodes

flags = tf.app.flags
flags.DEFINE_integer('mode', 0, '并行计算的模式: 0, 1, 2, 3，共4种')
flags.DEFINE_integer('processes_number', 1, '并行的执行进程数:1, 2, 4, 8')
flags.DEFINE_integer('batch_size', 50, 'batch_size:1, 5, 10, 30, 50, 100, 300')
flags.DEFINE_integer('envs_number', 256, 'envs_number:4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096')
flags.DEFINE_integer('D', 4, '输入层维度')
flags.DEFINE_integer('H', 50, '隐藏层维度')
flags.DEFINE_float('learning_rate', 1e-4, '学习率')
config = flags.FLAGS
D = config.D

if config.mode == 0:
    Agent = Agent_0
if config.mode == 1:
    Agent = Agent_1
if config.mode == 2:
    Agent = Agent_2
if config.mode == 3:
    Agent = Agent_3

if __name__ == "__main__":
    # 深度神经网络初始化
    p_net = Policy_Net(config)

    agent = Agent(p_net, processes_number=config.processes_number, envs_number=config.envs_number, batch_size=config.batch_size)  # cpu_count()//2   # 1, 50, 1024

    a = time.time()
    while agent.episode_number <= 100 * 10000:  # 5000:  # total_episodes:  # episode_number为当前episode数
        flag = agent.step()
        if not flag:
            break
    b = time.time()
    print(agent.episode_number)
    print(b - a)
