# -*- coding: utf-8 -*-

import pandas as pd

from agent import Agent
from pettingzoo.magent import battle_v2

""" Data Structure of Replay """


class Simulator:

    def __init__(self, env, agent):
        self.env = env  # normal mode, or parallel mode.

        """ Env Agent """
        self.env_agent_ls = self.env.possible_agents
        self.n_obs = self.env.observation_spaces
        self.n_action = list(self.env.action_spaces.values())[0].n  # action_spaces is a dict
        self.n_agent = len(self.env_agent_ls)
        self.env_agent_dict = {agent: i  # mapping from str to int.
                               for i, agent in enumerate(self.env_agent_ls)}

        """ Deep Reinforcement Learning Agent """
        self.agent = agent

        print("Initialize simulator...")

    def _parallel_env_loop(self, episode_id, num_step):  # For parallel mode
        """
        :param num_step:
        :return:
        """

        """ Step 1: Initialize. """
        obs = self.env.reset()

        episode_replay = []  # episode_id, step_id, agent_id, s, a, r, s'

        """ Step 2: Start simulating... """
        for step_id in range(num_step):
            actions = {agent_id: self.agent.choose_action(agent_id=agent_id, obs=obs[agent_id])
                       for agent_id in self.env.agents}  # get the names of all live agents
            new_obs, rewards, done_s, infos = self.env.step(actions)

            """ Store episode replay. """
            for agent_id in self.env.agents:
                episode_replay.append([episode_id, step_id, self.env_agent_dict[agent_id],
                                       obs[agent_id], actions[agent_id], rewards[agent_id], new_obs[agent_id]])

        return episode_replay

    def _normal_env_loop(self, num_step):  # For normal mode

        for agent in self.env.agent_iter(max_iter=num_step):
            obs, reward, done, info = self.env.last()
            avail_action = self.env.agent_selection
            action = self.agent.choose_action(agent, obs, avail_action)
            self.env.step(action)

    def generate_replays(self, num_episode=1, num_step=10):

        replay_buffer = []
        for episode_id in range(num_episode):
            replay_buffer.extend(self._parallel_env_loop(episode_id, num_step))

        replay_buffer_df = pd.DataFrame(replay_buffer)
        replay_buffer_df.columns = ["episode_id", "step_id", "agent_id", "s", "a", "r", "new_s"]
        return replay_buffer_df


if __name__ == '__main__':

    test_env = battle_v2.parallel_env()
    test_env.reset()

    """ Initialize the episode generator."""
    test_agent = Agent(agent_ls=test_env.possible_agents,
                       n_obs=test_env.observation_spaces,
                       n_action=list(test_env.action_spaces.values())[0].n)
    test_sim = Simulator(env=test_env, agent=test_agent)

    rpl = test_sim.generate_replays(num_episode=1, num_step=1)
    print(rpl)
