import sys
import gym
import numpy as np
import torch  # torch.cuda.is_available()
from torch import nn
from torch.distributions import Normal

from common import Agent, Logger, Buffer
from copy import deepcopy


class SACAgent(Agent):
    def __init__(self, state_space, action_space):
        super().__init__(state_space, action_space)
        self.build_actor()
        self.build_critic()

    def build_actor(self):
        actor_layer = (self.state_dim,) + (256, 256)
        self.actor = self.build_network(actor_layer, nn.ReLU, nn.ReLU)
        self.mu = nn.Linear(256, self.action_dim)
        self.log_std = nn.Linear(256, self.action_dim)
        self.actor_para = [p for p in self.actor.parameters()]
        self.actor_para += [p for p in self.mu.parameters()]
        self.actor_para += [p for p in self.log_std.parameters()]
        self.policy_optimizer = torch.optim.Adam(self.actor_para, lr=1e-3)

    def build_critic(self):
        critic_layer = (self.state_dim + self.action_dim, ) + (256, 256) + (1,)
        self.critic1 = self.build_network(critic_layer, nn.ReLU, nn.Identity)
        self.critic2 = self.build_network(critic_layer, nn.ReLU, nn.Identity)
        self.critic_para = [p for p in self.critic1.parameters()] + [p for p in self.critic2.parameters()]
        # critic_para = itertools.chain(self.critic1.parameters(), self.critic2.parameters())
        self.value_optimizer = torch.optim.Adam(self.critic_para, lr=1e-3)

    def get_value(self, state, action):
        state = torch.as_tensor(state, dtype=torch.float32)
        action = torch.as_tensor(action, dtype=torch.float32)
        state = torch.cat([state, action], dim=-1)
        return torch.squeeze(self.critic1(state), dim=-1), torch.squeeze(self.critic2(state), dim=-1)

    def get_action(self, state):
        action = self.actor(torch.as_tensor(state, dtype=torch.float32))
        mu = self.mu(action)
        std = torch.exp(torch.clamp(self.log_std(action), -20, 2))
        actions = Normal(mu, std)
        action = actions.rsample()
        return action, actions

    def get_action_prob(self, state):
        action, actions = self.get_action(state)
        log_prob = actions.log_prob(action).sum(axis=-1)
        log_prob -= (2 * (np.log(2) - action - nn.functional.softplus(-2 * action))).sum(axis=1)
        action = self.action_limit * torch.tanh(action)
        return action, log_prob

    def choose_action(self, state):
        action = self.action_limit * self.get_action(state)[0]
        return action.detach().numpy()

    def update(self, buffer, target_agent):
        for i in range(50):
            state_bf, action_bf, reward_bf, next_state_bf, done_bf = buffer.sample_batch()

            next_action, next_log_prob = self.get_action_prob(next_state_bf)
            ns_a_value1, ns_a_value2 = target_agent.get_value(next_state_bf, next_action)
            ns_a_value = torch.min(ns_a_value1, ns_a_value2).detach()
            target_value = reward_bf + 0.99 * (1 - done_bf) * (ns_a_value - 0.2 * next_log_prob)

            s_a_value1, s_a_value2 = self.get_value(state_bf, action_bf)
            value_loss = ((target_value - s_a_value1) ** 2).mean() + ((target_value - s_a_value2) ** 2).mean()
            self.optimize(value_loss, self.value_optimizer)

            # 屏蔽Critic的梯度
            for para in self.critic_para: para.requires_grad = False
            action, log_prob = self.get_action_prob(state_bf)
            s_a_value1, s_a_value2 = self.get_value(state_bf, action)
            state_action_value = torch.min(s_a_value1, s_a_value2)
            policy_loss = (0.2 * log_prob - state_action_value).mean()
            self.optimize(policy_loss, self.policy_optimizer)

            # 恢复Critic的梯度
            for para in self.critic_para: para.requires_grad = True
            self.update_target(target_agent)

    def update_target(self, target_agent):
        with torch.no_grad():
            for para, para_targ in zip(self.actor.parameters(), target_agent.actor.parameters()):
                para_targ.data = para_targ.data.mul(0.995)
                para_targ.data = para_targ.data.add((1 - 0.995) * para.data)

            for para, para_targ in zip(self.critic_para, target_agent.critic_para):
                para_targ.data = para_targ.data.mul(0.995)
                para_targ.data = para_targ.data.add((1 - 0.995) * para.data)


env = gym.make('Pendulum-v1')  # Hopper-v4
agent = SACAgent(env.observation_space, env.action_space)
target_agent = deepcopy(agent)
for para in target_agent.actor.parameters(): para.requires_grad = False
for para in target_agent.critic_para: para.requires_grad = False

buffer = Buffer(int(1e6), env.observation_space.shape, env.action_space.shape)
sys.stdout = Logger()

global_step = 0
for epoch in range(1000):
    state, done, episode_step, episode_reward, epoch_reward = env.reset()[0], False, 0, 0, []
    for _ in range(4000):
        episode_step, global_step = episode_step + 1, global_step + 1
        action = agent.choose_action(state) if global_step > 10000 else env.action_space.sample()

        next_state, reward, done, _, _ = env.step(action)
        done = False if episode_step == 1000 else done
        buffer.store(state, action, reward, next_state, done)
        state = next_state
        episode_reward += reward

        if done or episode_step == 1000:
            epoch_reward.append(episode_reward)
            state, done, episode_step, episode_reward = env.reset()[0], False, 0, 0

        if global_step >= 1000 and global_step % 50 == 0:
            agent.update(buffer, target_agent)

    print(f"R:{np.mean(epoch_reward):.3f}")
