# -*- coding: utf-8 -*-

import torch

from model.base_rnn import RNN  # for execute
from model.qmix_net import QMixNet  # for learning
from model.vdn_net import VDNNet  # for learning

from torch.optim import Adam

mixing_mapping = {
    "q-mix": QMixNet,
    "vdn": VDNNet,
}


class ValueMixer:

    def __init__(self, n_agent, obs_space, action_space,
                 hidden_dim=64, optimizer=Adam, lr=0.005, gamma=0.05,
                 use_gpu=False,
                 mixing_method="q-mix",
                 model_dir=None,
                 load_model=None,
                 ):
        """ VDN or Q-MIX """

        self.n_agent = n_agent
        self.obs_space = obs_space
        self.action = action_space

        """
        input_shape = self.obs_shape  # todo
        # 根据参数决定RNN的输入维度
        if args.last_action:
            input_shape += self.n_actions
        if args.reuse_network:
            input_shape += self.n_agents
        """

        """ Hidden feature for evaluate network and target network. """
        self.eval_hidden, self.target_hidden = None, None  # initialized with num_episode
        self.hidden_dim = hidden_dim

        """ Policy network """
        self.eval_policy_net = RNN(n_agent, obs_space, action_space)
        self.target_policy_net = RNN(n_agent, obs_space, action_space)

        """ Mixing network (QMixNet or VDNNet ) """
        self.mixing_method = mixing_method
        self.eval_mixing_net = mixing_mapping[mixing_method]()
        self.target_mixing_net = mixing_mapping[mixing_method]()

        """ Sync the parameter between target networks and evaluate networks """
        self.target_policy_net.load_state_dict(self.eval_policy_net.state_dict())
        self.target_mixing_net.load_state_dict(self.eval_mixing_net.state_dict())

        self.use_gpu = use_gpu  # GPU train
        if torch.cuda.is_available() and use_gpu:
            self.eval_policy_net.cuda()
            self.target_policy_net.cuda()
            self.eval_mixing_net.cuda()
            self.target_mixing_net.cuda()

        """ Load trained model """

        """ Evaluation parameter = policy net + mixing net. """
        self.eval_parameter = list(self.eval_mixing_net.parameters()) + list(self.eval_policy_net.parameters())

        """ Optimizer """
        self.lr = lr
        self.gamma = gamma
        self.optimizer = optimizer(self.eval_parameter, lr=self.lr)

    def train(self, replays):
        r"""
            L(\theta) = \sum_i^b [ (y_i^{tot} - Q_{tot} (\tau, u, s; \theta) )^2 ]
            y^{tot} = r + \gamma max_u' Q_{tot} (\tau', u', s', \theta-)

            \theta: evaluate network
            \theta-: target network

            在learn的时候，抽取到的数据是四维的，四个维度分别为
            (num_episode, num_step, num_agent, dim_obs)

            因为在选动作时不仅需要输入当前的inputs，还要给神经网络输入hidden_state，
            hidden_state和之前的经验相关，因此就不能随机抽取经验进行学习。
            所以这里一次抽取多个episode，然后一次给神经网络传入每个episode的同一个位置的transition
        :return:
        """

        # Pandas DataFrame
        s, new_s, a, r, done = replays['s'], replays['new_s'], replays['a'], replays['r'], replays['done']
        # todo padding

        """ Step 1: Convert replays to Torch Tensor. """
        # https://stackoverflow.com/questions/50307707/convert-pandas-dataframe-to-pytorch-tensor
        def df_to_float_tensor(x): return torch.tensor(x, dtype=torch.float)
        def df_to_long_tensor(x): return torch.tensor(x, dtype=torch.long)

        s, new_s, r, done = [df_to_float_tensor(x) for x in [s, new_s, r, done]]  # todo: add padding
        a = df_to_long_tensor(a)
        
        def to_cpu_tensor(x): return x.cpu()
        def to_gpu_tensor(x): return x.cuda()

        if torch.cuda.is_available() and self.use_gpu:
            s, new_s, a, r, done = [to_gpu_tensor(x) for x in [s, new_s, a, r, done]]

        """ Step 2: Get Q value for each agent. """
        # todo q_evals, q_targets = self.get_q_values(batch, max_episode_len)
        q_eval_s, q_target_s = self.compute_agent_q_values()

        """ Step 3: Compute total Q value (Q-Mix or VDN) 
                    All agents Q values => One mixing total Q value.
        """
        if self.mixing_method == "q-mix":
            eval_q_tot = self.eval_mixing_net(q_eval_s, s)  # todo q_eval -> q_target
            target_q_tot = self.target_mixing_net(q_target_s, s)  # todo q_eval -> q_target
        else:  # VDN does not need globe state to mix all values.
            eval_q_tot = self.eval_mixing_net(q_eval_s)
            target_q_tot = self.target_mixing_net(q_target_s)

        """ Step 4: Loss functions """
        self.optimizer.zero_grad()

        self.optimizer.step()

    def compute_agent_q_values(self, input, num_episode, num_step):
        """ :return: the shape of Q-value is (num_episode, num_step, num_agent, num_action) """

        # todo: get num_episode & num_step
        q_eval_s, q_target_s = [], []

        for step_id in range(num_step):
            # add last action and agent id to obs  # todo convert to cuda mode if needed.

            q_eval, self.eval_hidden = self.eval_policy_net(input, self.eval_hidden)
            q_target, self.target_hidden = self.target_policy_net(input, self.target_hidden)

            # convert to (num_episode, num_agents, num_actions)  todo: see the shape of RNN output.
            q_eval = q_eval.view(num_episode, self.n_agent, -1)
            q_target = q_target.view(num_episode, self.n_agent, -1)

            q_eval_s.append(q_eval)
            q_target_s.append(q_target)

        """ The length of q_eval_s & q_target_s is num_step, 
            the shape of their elements is (num_episode, num_agent, num_action)
            Now, convert them to (num_episode, num_step, num_agent, num_action) 
        """
        q_eval_s = torch.stack(q_eval_s, dim=1)
        q_target_s = torch.stack(q_target_s, dim=1)

        return q_eval_s, q_target_s

    def init_hidden(self, num_episode):
        """ 为每个episode中的每个agent都初始化一个eval_hidden、target_hidden """
        self.eval_hidden = torch.zeros((num_episode, self.n_agent, self.hidden_dim))
        self.target_hidden = torch.zeros((num_episode, self.n_agent, self.hidden_dim))
