
from concurrent.futures import ThreadPoolExecutor
import time
from Environment import GameEnvironment
from model_manager import ModelManager


def main(environments_count: int, model_dir: str, load_model_path: str = None):
    model_manager = ModelManager(
        state_shape=(17, 17, 6),
        action_size=12,
        use_double_dqn=True,
        use_dueling=True,
        use_per=True,
        n_step=8,
        memory_size=20000,
        batch_size=64,
        target_update_freq=50,
        learning_rate=0.001,
        gamma=0.99
    )
    if load_model_path is not None:
        model_manager.load_model(load_model_path)
    model_manager.set_training_state(True)
    thread_pool = ThreadPoolExecutor()
    futures = []
    print('线程池启动')
    for i in range(environments_count):
        # 创建游戏环境并传入模型管理器
        env = GameEnvironment(model_manager)
        futures.append(thread_pool.submit(env.train, 150))

    loss_list =[]
    print('等待初始经验上传')
    while model_manager.experiencePool.pool.size < model_manager.experiencePool.batch_size:
        time.sleep(0.05)
        pass
    print('开始训练')
    batch_count = 0
    while sum(f.running() for f in futures) > 0 or batch_count < 8000:
        loss = model_manager.train_one_batch()
        loss_list.append(loss)
        batch_count += 1
        if batch_count % 1000 == 0:
            model_manager.save_model(f'{model_dir}/ep_{batch_count}_{time.strftime("%m-%d-%H-%M", time.localtime())}.pth')
        if batch_count % 400 == 0:
            GameEnvironment(model_manager).test(8)
            #model_manager.decisionMaker.decay_epsilon()
    thread_pool.shutdown(wait=False)


if __name__ == "__main__":
    main(2, '../../checkpoints', '../../checkpoints/ep_8000_09-15-17-37.pth')

