from light_rl.agent.agent import AgentBase
from light_rl.model.model import Model
from light_rl.utils.dict_wrapper import DictWrapper
from light_rl.utils.buffer import create_reply_buffer
from light_rl.utils.utils import load_config, get_device
import torch
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import os
import json
from typing import Union
from copy import deepcopy


class AgentTD3(AgentBase):

    def __init__(self, actor: Model, critic1: Model, critic2: Model, config_path: str,
                 writer: SummaryWriter = None, model_dir: str = "") -> None:
        super().__init__(config_path, writer, model_dir)
        self.actor = actor
        self.critic1 = critic1
        self.critic2 = critic2
        self.actor_target = deepcopy(actor)
        self.critic1_target = deepcopy(critic1)
        self.critic2_target = deepcopy(critic2)

        self.buffer = create_reply_buffer(config_path)

        self.state_dim = self.full_config.env.state_dim
        self.action_dim = self.full_config.env.action_dim
        self.action_bound = self.full_config.env.action_bound

        self.resume_model()

    def react(self, state: np.ndarray, train: bool = True) -> Union[np.ndarray, int]:
        state = torch.tensor(state).to(self.device)
        action = self.actor(state)
        if not train:
            return action.detach().cpu().numpy()
        else:
            noise = torch.randn_like(action) * self.config.noise_sigma
            action = torch.clip(action + noise, -self.action_bound, self.action_bound)
        return action.detach().cpu().numpy()

    def learn(self, state, action, reward, next_state, done):
        super().learn(state, action, reward, next_state, done)
        # 将数据都保存起来
        self.buffer.push(state, action, reward * self.config.reward_scale, next_state, done)
        if len(self.buffer) < self.config.start_learn:
            return
        if self.step % self.config.update_every != 0:
            return
        for j in range(self.config.update_times):
            states, actions, rewards, next_states, dones = self.buffer.sample(self.config.batch_size)
            dones = dones.float()
            rewards = rewards.float()
            # 计算target action
            next_actions = self.actor_target(next_states)
            noise = torch.randn_like(next_actions) * self.config.noise_sigma
            noise = torch.clip(noise, -self.config.noise_clip, self.config.noise_clip)
            next_actions = torch.clip(next_actions + noise, -self.action_bound, self.action_bound)

            # 计算target Q
            next_states_actions = torch.cat((next_states, next_actions), dim=1)
            v1 = self.critic1_target(next_states_actions).squeeze()
            v2 = self.critic2_target(next_states_actions).squeeze()
            v_target = rewards + (1 - dones) * self.config.gamma * torch.min(v1, v2)

            # 计算critic loss
            states_actions = torch.cat((states, actions), dim=1)
            v_value1 = self.critic1(states_actions).squeeze()
            v_value2 = self.critic2(states_actions).squeeze()

            v_loss1 = self.critic1.criterion(v_value1, v_target.detach())
            v_loss2 = self.critic2.criterion(v_value2, v_target.detach())
            # v_loss = v_loss1 + v_loss2
            self.critic1.update(v_loss1)
            self.critic2.update(v_loss2)

            self.soft_update(self.critic1_target, self.critic1, self.config.tau)
            self.soft_update(self.critic2_target, self.critic2, self.config.tau)

            if j % self.config.policy_delay == 0:
                # 计算policy loss
                action = self.actor(states)
                states_actions = torch.cat((states, action), dim=1)
                policy_loss = self.critic1(states_actions)
                policy_loss = -policy_loss.mean()
                self.actor.update(policy_loss)

            self.soft_update(self.actor_target, self.actor, self.config.tau)
