from light_rl.agent.agent import AgentBase
from light_rl.model.model import Model
from light_rl.utils.dict_wrapper import DictWrapper
from light_rl.utils.utils import load_config, get_device
import torch
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from typing import Union
import os
import json


class AgentA2C(AgentBase):
    def __init__(self, actor: Model, critic: Model, config_path: str, writer: SummaryWriter = None,
                 model_dir: str = "") -> None:
        super().__init__(config_path, writer, model_dir)
        # 加载配置文件
        self.state_dim = self.full_config.env.state_dim
        self.action_dim = self.full_config.env.action_dim

        self.actor = actor
        self.critic = critic
        self.resume_model()

    def react(self, state: np.ndarray, train: bool = True) -> Union[np.ndarray, int]:
        state = torch.tensor(state, dtype=torch.float32, device=self.device)
        dist = self.actor(state)
        action = dist.sample()
        return action.detach().cpu().numpy()

    def learn(self, state, action, reward, next_state, done):
        super().learn(state, action, reward, next_state, done)
        # 保存数据
        # 如果没有结束，保存数据
        state = torch.tensor(state, dtype=torch.float32, device=self.device)
        action = torch.tensor(action, dtype=torch.float32, device=self.device)
        next_state = torch.tensor(next_state, dtype=torch.float32, device=self.device)
        # critic 学习
        v = self.critic(state)
        v_next = self.critic(next_state)
        v_target = reward + self.config.gamma * v_next * (1 - done)

        critic_loss = self.critic.criterion(v, v_target)
        self.critic.update(critic_loss)

        # actor 学习
        dist = self.actor(state)
        action_log_prob = dist.log_prob(action)
        entropy = dist.entropy().mean()

        actor_loss = - action_log_prob * (v_target - v).detach() - self.config.entropy_weight * entropy
        self.actor.update(actor_loss)
