"""Run some games in some scenarios locally.

This is the correct entry to start the program.
To test your agent, the major modifications you need to make are:
1. import your own agent class (you definitely know where it is)
2. instantiate your agent and its opponent (replace the DemoAgent below)

The code looks like:

from your_folder.your_file import YourAgent

omega = YourAgent()
gamma = # opponent agent maybe DemoAgent, YourAgent or some agent else
"""
import time

from core.env.train_env import TrainEnv
from examples.demo_agent import DemoAgent
from myAI.my_agent import myAgent
# from my_ai.my_agent import MyAgent as lm_agent

TEAM, CAPTAIN, CODE = 'LGD.DK', '程恺', 'bb06df'  # 18913387965
RED, BLUE, GREEN = 0, 1, -1
#[1231, 3231, 1531, 3531, 1631, 3631]  # all scenarios to play
# SCENARIOS = [2010131194, 2010141294, 2010211129, 2010431153, 2010441253, 2030111194, 2030331196, 2030341296]
# 2010131194--极简中等起伏地遭遇战 1
# 2010141294--极简中等起伏地遭遇战 2
# 2010211129--连级高原通道夺控战
# 2010431153--连级水网稻田遭遇战 1
# 2010441253--连级水网稻田遭遇战 2
# 2030111194--营级中等起伏地夺控战
# 2030331196--营级山岳丛林遭遇战 1
# 2030341296--营级山岳丛林遭遇战 2
SCENARIOS = [2030331196]
NUM_GAMES = 2  # number of games in each scenario


def main():
    # instantiate two agents
    # alpha = myAgent()
    alpha = DemoAgent()
    # beta = DemoAgent()
    beta = myAgent()

    for scenario in SCENARIOS:
        env = TrainEnv(TEAM, CAPTAIN, CODE, scenario)  # instantiate an environment
        print('Environment is ready.')
        for i in range(NUM_GAMES):
            begin = time.time()
            game_id = f'{time.strftime("%Y-%m-%d-%H-%M-%S")}_{scenario}_{i}'

            # let alpha play as red and beta as blue, call 'setup()' once before each game
            alpha.setup(scenario, RED)
            beta.setup(scenario, BLUE)
            print('Both agents are ready.')
            state, done = env.reset()  # get the initial state of environment

            while not done:  # loop until the end of game
                # call 'step()' to get actions agent wants to do given its observation
                alpha_actions = alpha.step(state[RED])
                # begin_blue_action = time.perf_counter()
                beta_actions = beta.step(state[BLUE])
                # print(f'【0】blue step time: {time.perf_counter() - begin_blue_action} at cur_step: {beta.observation["time"]["cur_step"]}')
                state, done = env.step(alpha_actions + beta_actions)  # environment steps forward

            # call 'reset()' once after each game
            alpha.reset()
            beta.reset()
            env.save_replay(game_id)  # save replay to file: logs/replay/game_id.json
            print(f'Total time of {game_id}: {time.time() - begin:.3f}s')
        print(f'Finish scenario: {scenario}.')
    print('Finish all games.')


if __name__ == '__main__':
    main()
