import sys
import gym
import numpy as np
import torch  # torch.cuda.is_available()
from torch import nn

from common import Agent, Logger, Buffer
from copy import deepcopy


class TD3Agent(Agent):
    def __init__(self, state_space, action_space):
        super().__init__(state_space, action_space)
        self.build_actor_critic()

    def build_actor_critic(self):
        actor_layer = (self.state_dim,) + (256, 256) + (self.action_dim,)
        self.actor = self.build_network(actor_layer, nn.ReLU, nn.Tanh)
        self.policy_optimizer = torch.optim.Adam(self.actor.parameters(), lr=1e-3)

        critic_layer = (self.state_dim + self.action_dim,) + (256, 256) + (1,)
        self.critic1 = self.build_network(critic_layer, nn.ReLU, nn.Identity)
        self.critic2 = self.build_network(critic_layer, nn.ReLU, nn.Identity)
        self.critic_para = [p for p in self.critic1.parameters()] + [p for p in self.critic2.parameters()]
        # critic_para = itertools.chain(self.critic1.parameters(), self.critic2.parameters())
        self.value_optimizer = torch.optim.Adam(self.critic_para, lr=1e-3)

    def get_action(self, state):
        state = torch.as_tensor(state, dtype=torch.float32)
        return self.action_limit * self.actor(state)

    def get_value(self, state, action):
        state = torch.as_tensor(state, dtype=torch.float32)
        action = torch.as_tensor(action, dtype=torch.float32)
        state = torch.cat([state, action], dim=-1)
        return torch.squeeze(self.critic1(state), dim=-1), torch.squeeze(self.critic2(state), dim=-1)

    def choose_action(self, state):
        action = self.get_action(state).detach().numpy()
        action += 0.1 * np.random.randn(self.action_dim)
        return np.clip(action, -self.action_limit, self.action_limit)

    def smooth_action(self, state):
        action = self.get_action(state)
        epsilon = torch.clamp(torch.randn_like(action) * 0.2, -0.5, 0.5)
        return torch.clamp(action + epsilon, -self.action_limit, self.action_limit)

    def update(self, buffer, target_agent):
        for i in range(50):
            state_bf, action_bf, reward_bf, next_state_bf, done_bf = buffer.sample_batch()

            action = target_agent.smooth_action(next_state_bf)
            ns_a_value1, ns_a_value2 = target_agent.get_value(next_state_bf, action)
            ns_a_value = torch.min(ns_a_value1, ns_a_value2).detach()
            target_value = reward_bf + 0.99 * (1 - done_bf) * ns_a_value

            s_a_value1, s_a_value2 = self.get_value(state_bf, action_bf)
            value_loss = ((target_value - s_a_value1) ** 2).mean() + ((target_value - s_a_value2) ** 2).mean()
            self.optimize(value_loss, self.value_optimizer)

            if i % 2 == 0:
                # 屏蔽Critic的梯度
                for para in self.critic_para: para.requires_grad = False
                action = self.get_action(state_bf)
                state_action_value, _ = self.get_value(state_bf, action)
                policy_loss = -state_action_value.mean()
                self.optimize(policy_loss, self.policy_optimizer)

                # 恢复Critic的梯度
                for para in self.critic_para: para.requires_grad = True
                self.update_target(target_agent)

    def update_target(self, target_agent):
        with torch.no_grad():
            for para, para_targ in zip(self.actor.parameters(), target_agent.actor.parameters()):
                para_targ.data = para_targ.data.mul(0.995)
                para_targ.data = para_targ.data.add((1 - 0.995) * para.data)

            for para, para_targ in zip(self.critic_para, target_agent.critic_para):
                para_targ.data = para_targ.data.mul(0.995)
                para_targ.data = para_targ.data.add((1 - 0.995) * para.data)


env = gym.make('Pendulum-v1')  # 'Hopper-v4'
agent = TD3Agent(env.observation_space, env.action_space)
target_agent = deepcopy(agent)
for para in target_agent.actor.parameters(): para.requires_grad = False
for para in target_agent.critic_para: para.requires_grad = False

buffer = Buffer(int(1e6), env.observation_space.shape, env.action_space.shape)
sys.stdout = Logger()

global_step = 0
for epoch in range(1000):
    state, done, episode_step, episode_reward, epoch_reward = env.reset()[0], False, 0, 0, []
    for _ in range(4000):
        episode_step, global_step = episode_step + 1, global_step + 1
        action = agent.choose_action(state) if global_step > 10000 else env.action_space.sample()

        next_state, reward, done, _, _ = env.step(action)
        done = False if episode_step == 1000 else done
        buffer.store(state, action, reward, next_state, done)
        state = next_state
        episode_reward += reward

        if done or episode_step == 1000:
            epoch_reward.append(episode_reward)
            state, done, episode_step, episode_reward = env.reset()[0], False, 0, 0

        if global_step >= 1000 and global_step % 50 == 0:
            agent.update(buffer, target_agent)

    print(f"R:{np.mean(epoch_reward):.3f}")
