from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

from torch import optim

import rl_agent
from base_algo import BaseAlgo
from extragradient import ExtraAdam
from utils import *


class Reinforce(BaseAlgo):

    def __init__(self, player_id, num_actions, info_state_size, **agent_specific_kwargs):

        super().__init__(player_id, num_actions, info_state_size, **agent_specific_kwargs)

        self._net_torso = MLPTorso(self.info_state_size, self.hidden_layers_sizes)
        self._policy_logits_layer = SonnetLinear(self.hidden_layers_sizes[-1], self.num_actions, activate_relu=False)
        self.pi_network = nn.Sequential(self._net_torso, self._policy_logits_layer).to(self.device)

        if self.is_extra_grad_method:
            self.pi_optimizer = ExtraAdam(self.pi_network.parameters(), lr=self.learn_rate)
        else:
            self.pi_optimizer = optim.Adam(self.pi_network.parameters(), lr=self.learn_rate)

    def get_loss(self, rewards, trajectories):
        losses = []
        for i, trajectory in enumerate(trajectories):
            state_infos = torch.tensor(trajectory["info_state"], dtype=torch.float).to(self.device)
            legal_actions_ = trajectory["legal_actions"]
            actions = trajectory["actions"]
            reward = torch.tensor(rewards[i][self.player_id], dtype=torch.float)
            policy_logits_ = self.pi_network(state_infos)
            log_prob_list = []
            for j, logits_ in enumerate(policy_logits_):
                lecal_action = legal_actions_[j]
                trans_matrix = torch.zeros(self.num_actions, len(lecal_action))
                trans_matrix = trans_matrix.scatter_(dim=1, index=torch.tensor(lecal_action).unsqueeze(1), value=1.0)
                logits = torch.mm(logits_.unsqueeze(0), trans_matrix)
                log_prob = F.log_softmax(logits, dim=1)

                log_prob_list.append(log_prob[0, actions[j]])

            log_probs = torch.sum(torch.stack(log_prob_list))

            losses.append(log_probs * reward)

        loss = torch.mean(torch.stack(losses))
        loss = -loss  # 梯度上升
        return loss.to(self.device)

    def update(self, rewards, trajectories):
        loss = self.get_loss(rewards, trajectories)

        self.pi_optimizer.zero_grad()
        loss.backward()

        if self.is_extra_grad_method:
            if self.is_update_over:
                self.pi_optimizer.extrapolation()  # 第一次更新
            else:
                self.pi_optimizer.step()
                self.update_counter += 1
            self.is_update_over = not self.is_update_over

        else:
            self.pi_optimizer.step()
            self.update_counter += 1

        return round(loss.detach().tolist(), 4)

    def step(self, time_step, is_evaluation=False):

        if not time_step.last():
            info_state = time_step.observations["info_state"][self.player_id]
            legal_actions = time_step.observations["legal_actions"][self.player_id]
            action, probs = self.act(info_state, legal_actions)

        else:
            action = None
            probs = []

        return rl_agent.StepOutput(action=action, probs=probs)
