import einops
import numpy as np
import torch
from agents.SACAgent import SACAgent
from config.cfg import Config
from utils.misc import disable_gradients, hard_update, enable_gradients
import torch.nn.functional as F


class MSACAgent:
    def __init__(self, nagents: int, net_dims: [], state_dim: [], action_dim: [], args: Config):
        self.args = args
        self.nagents = nagents
        self.state_dim_sum = np.array(state_dim).sum()
        self.action_dim_sum = np.array(action_dim).sum()
        self.agents = [
            SACAgent(
                id=i,
                policy_dims=net_dims,
                in_dim=state_dim[i],
                out_dim=action_dim[i],
                critic_dims=net_dims,
                state_dim=self.state_dim_sum,
                action_dim_sum=self.action_dim_sum,
                args=args
            )
            for i in range(nagents)
        ]

        if args.autotune:
            self.target_entropy = -action_dim[0]
            self.log_alpha = torch.tensor(
                np.log(args.alpha), dtype=torch.float, requires_grad=True, device=args.device)
            self.alpha = self.log_alpha.exp().item()
            self.alpha_optimizer = torch.optim.AdamW(
                [self.log_alpha], lr=args.alpha_lr)
        else:
            self.alpha = args.alpha

    @property
    def policies(self):
        return [a.policy for a in self.agents]

    @property
    def target_policies(self):
        return [a.target_policy for a in self.agents]

    def step(self, state):
        return [a.step(obs) for a, obs in zip(self.agents, state)]

    def get_probs(self, state):
        return [a.step(obs)[1] for a, obs in zip(self.agents, state)]

    def take_actions(self, state):
        return [a.take_action(obs) for a, obs in zip(self.agents, state)]

    def take_target_actions(self, state):
        return [a.take_target_action(obs) for a, obs in zip(self.agents, state)]

    def soft_update_target(self):
        for agent in self.agents:
            # update critic target network by using soft policy
            for param, target_param in zip(agent.critic.parameters(), agent.target_critic.parameters()):
                target_param.data.copy_(
                    target_param.data * (1.0 - self.args.tau) +
                    param.data * self.args.tau
                )
            # update policy target network by using soft policy
            for param, target_param in zip(agent.policy.parameters(), agent.target_policy.parameters()):
                target_param.data.copy_(
                    target_param.data * (1.0 - self.args.tau) +
                    param.data * self.args.tau
                )

    def get_next_actions_logprobs(self, next_state):
        # get global next_actions so that each critic network can calc td_target
        next_actions = []
        next_logprobs = []
        for a, next_obs in zip(self.agents, next_state):
            action, log_prob = a.step(next_obs)
            next_actions.append(action)
            next_logprobs.append(log_prob)

        # 合并所有动作空间以及概率密度
        next_actions = torch.hstack(next_actions)
        next_logprobs = torch.hstack(next_logprobs)

        next_actions = next_actions.reshape(
            self.args.batch_size, self.action_dim_sum)
        next_logprobs = einops.reduce(next_logprobs.reshape(
            self.args.batch_size, self.nagents),
            "b a -> b ()",
            "sum"
        )
        next_logprobs = next_logprobs.reshape(-1)
        return next_actions, next_logprobs

    def get_new_actions(self, state):
        new_actions = []
        for a, obs in zip(self.agents, state):
            action, _ = a.step(obs)
            new_actions.append(action)
        new_actions = torch.hstack(new_actions)
        new_actions = new_actions.reshape(
            self.args.batch_size,
            self.action_dim_sum
        )
        return new_actions

    def calc_td_target(self, agent: SACAgent, rewards, next_state, next_actions, next_logprobs, dones):

        dones = einops.reduce(dones, 'b a -> b ()', 'min')

        next_state_input = torch.hstack((next_state[0], next_state[1]))
        q1_next_target, q2_next_target = agent.target_critic(
            next_state_input, next_actions)
        # print("target shape is ", q1_next_target.shape)
        next_value = torch.min(
            q1_next_target, q2_next_target) - self.alpha * next_logprobs
        # print("next_value shape is ", next_value.shape)
        # print("rewards shape is ", rewards.shape)
        # print("dones shape is ", dones.shape)
        td_target = rewards.flatten() + (1-dones.flatten()) * \
            self.args.gamma * (next_value).view(-1)
        return td_target

    def update_network(self, sample):
        state, actions, rewards, next_state, dones = sample
        state = torch.tensor(state, dtype=torch.float).to(
            device=self.args.device)
        state = einops.rearrange(state, 'batch agent dim -> agent batch dim')
        actions = torch.tensor(actions, dtype=torch.float).to(
            device=self.args.device)
        rewards = torch.tensor(rewards, dtype=torch.float).to(
            device=self.args.device)
        rewards = einops.rearrange(rewards, "b a -> a b")
        next_state = torch.tensor(next_state, dtype=torch.float).to(
            device=self.args.device)
        next_state = einops.rearrange(
            next_state, 'batch agent dim -> agent batch dim')
        dones = torch.tensor(dones, dtype=torch.float).to(
            device=self.args.device)
        next_actions, next_logprobs = self.get_next_actions_logprobs(
            next_state)
        # train critic network
        agent_id = 0
        for agent in self.agents:
            with torch.no_grad():
                td_target = self.calc_td_target(
                    agent, rewards[agent_id], next_state, next_actions, next_logprobs, dones)
            state_input = torch.hstack((state[0], state[1]))
            q1_value, q2_value = agent.critic(state_input, actions)
            q1_value = q1_value.reshape(-1)
            q2_value = q2_value.reshape(-1)
            q1_loss = F.mse_loss(q1_value, td_target)
            q2_loss = F.mse_loss(q2_value, td_target)
            q_loss = q1_loss + q2_loss
            agent.critic_optimizer.zero_grad()
            q_loss.backward()
            agent.critic_optimizer.step()
            agent_id += 1

        new_actions = self.get_new_actions(state)
        # train all policy network
        for agent in self.agents:
            state_input = torch.hstack((state[0], state[1]))
            q1_value, q2_value = agent.critic(state_input, new_actions)
            min_q_value = torch.min(q1_value, q2_value).detach()
            min_q_value = min_q_value.detach()

            obs = state[agent.id]
            logprobs = agent.step(obs)[1]
            disable_gradients(agent.critic)
            actor_loss = torch.mean((self.alpha * logprobs)-min_q_value)
            agent.policy_optimizer.zero_grad()
            actor_loss.backward()
            agent.policy_optimizer.step()
            enable_gradients(agent.critic)

        if self.args.autotune:
            with torch.no_grad():
                log_probs = self.get_probs(state)
                log_probs = torch.hstack(log_probs)
                log_probs = einops.reduce(
                    log_probs.reshape(self.args.batch_size, self.nagents),
                    'b a -> b ()',
                    "sum"
                )
            alpha_loss = (-self.log_alpha *
                          (log_probs + self.target_entropy)).mean()
            self.alpha_optimizer.zero_grad()
            alpha_loss.backward()
            self.alpha_optimizer.step()
        self.soft_update_target()
