# -*- coding: utf-8 -*-

import numpy as np

from pettingzoo.utils import random_demo

from algo.value_mixing import ValueMixer

policy_mapping = {
    "q-mix": ValueMixer,
    "vdn": ValueMixer,
}


class Agent:

    def __init__(self, agent_ls: list, n_obs, n_action, net="q-mix"):
        """
        :param agent_ls:
        :param n_obs:
        :param n_action:
        :param net:
        """

        """ Agent """
        self.agent_ls = agent_ls  # ['red_0', 'red_1', ..., 'blue_80']
        self.n_agent = len(agent_ls)
        self.agent_dict = {agent: i for i, agent in enumerate(self.agent_ls)}  # mapping from str to int.

        """ Observation space and action space. """
        self.obs_space = n_obs
        self.action_space = n_action  # numpy.int32 or int

        """ Policy network for choosing action. """
        self.policy = ValueMixer(self.n_agent, self.obs_space, self.action_space)

    def choose_action(self, agent_id, obs, rand_action=True):

        if rand_action:
            return np.random.sample(self.action_space)

        # Get Q value
        q = self.policy.eval_policy_net()  # agent_id, obs

        # Choose actions from Q value

    def train(self, replay_buffer):
        self.policy.train(replay_buffer)
