from light_rl.agent.agent import AgentBase
from light_rl.model.model import Model
from light_rl.utils.dict_wrapper import DictWrapper
from light_rl.utils.buffer import create_reply_buffer
from light_rl.utils.utils import load_config, get_device
import torch
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import os
import json
from typing import Union
from copy import deepcopy


class AgentDDPG(AgentBase):

    def __init__(self, actor: Model, critic: Model, config_path: str, writer: SummaryWriter = None,
                 model_dir: str = "") -> None:
        super().__init__(config_path, writer, model_dir)
        self.actor = actor
        self.critic = critic
        self.actor_target = deepcopy(actor)
        self.critic_target = deepcopy(critic)

        self.buffer = create_reply_buffer(config_path)

        self.state_dim = self.full_config.env.state_dim
        self.action_dim = self.full_config.env.action_dim
        self.action_bound = self.full_config.env.action_bound
        self.resume_model()

    def react(self, state: np.ndarray, train: bool = True) -> Union[np.ndarray, int]:
        state = torch.tensor(state).to(self.device)
        action = self.actor(state)
        if not train:
            return action.detach().cpu().numpy()
        else:
            noise = torch.randn_like(action) * self.config.noise_sigma
            action = torch.clip(action + noise, -self.action_bound, self.action_bound)
        return action.detach().cpu().numpy()

    def learn(self, state, action, reward, next_state, done):
        super().learn(state, action, reward, next_state, done)
        # 将数据都保存起来
        self.buffer.push(state, action, reward * self.config.reward_scale, next_state, done)
        if len(self.buffer) < self.config.start_learn:
            return
        if self.step % self.config.update_every != 0:
            return
        for _ in range(self.config.update_times):
            states, actions, rewards, next_states, dones = self.buffer.sample(self.config.batch_size)
            dones = dones.float()
            rewards = rewards.float()
            states_actions = torch.cat((states, actions), dim=1)
            v_value = self.critic(states_actions).squeeze()
            next_actions = self.actor_target(next_states)
            next_states_actions = torch.cat((next_states, next_actions), dim=1)
            next_v_value = self.critic_target(next_states_actions).squeeze()
            v_target = rewards + (1 - dones) * self.config.gamma * next_v_value.squeeze()

            loss = self.critic.criterion(v_value, v_target)
            self.critic.update(loss)

            action = self.actor(states)
            states_actions = torch.cat((states, action), dim=1)
            policy_loss = self.critic(states_actions)
            policy_loss = -policy_loss.mean()
            self.actor.update(policy_loss)

            self.soft_update(self.actor_target, self.actor, self.config.tau)
            self.soft_update(self.critic_target, self.critic, self.config.tau)
