from light_rl.agent.agent import AgentBase
from light_rl.model.model import Model
from light_rl.utils.utils import load_config, get_device
from light_rl.utils.dict_wrapper import DictWrapper
from light_rl.utils.buffer import EpochBuffer
import torch
from torch.utils.tensorboard import SummaryWriter
import numpy as np
from typing import Union
import os
import json
from copy import deepcopy


class AgentPPO(AgentBase):
    def __init__(self, actor: Model, critic: Model, config_path: str, writer: SummaryWriter = None,
                 model_dir: str = "") -> None:
        super().__init__(config_path, writer, model_dir)
        # 加载配置文件
        self.state_dim = self.full_config.env.state_dim
        self.action_dim = self.full_config.env.action_dim

        self.actor = actor
        self.critic = critic

        self.buffer = EpochBuffer(self.device)
        self.resume_model()

    def react(self, state: np.ndarray, train: bool = True) -> Union[np.ndarray, int]:
        state = torch.tensor(state, dtype=torch.float32, device=self.device)

        dist = self.actor(state)
        action = dist.sample()
        return action.detach().cpu().numpy()

    def _get_reward_to_go(self, rewards, dones):
        reward_to_go = []
        rewards = rewards.detach().cpu().numpy()
        dones = dones.detach().cpu().numpy()
        # 计算折扣奖励
        R = 0.
        for rew, done in zip(rewards[::-1], dones[::-1]):
            if done:
                R = 0.
            R = rew + self.config.gamma * R
            reward_to_go.append(R)
        reward_to_go.reverse()
        return reward_to_go

    def learn(self, state: np.ndarray, action: np.ndarray, reward, next_state: np.ndarray, done: bool):
        # 保存数据
        super().learn(state, action, reward, next_state, done)
        self.buffer.push(state, action, reward, next_state, done)
        if self.step % self.config.batch_size != 0:
            return
        states, actions, rewards, next_states, dones = self.buffer.get()
        self.buffer.clear()

        reward_to_go = self._get_reward_to_go(rewards, dones)
        reward_to_go = torch.tensor(np.array(reward_to_go), dtype=torch.float32, device=self.device)

        # 更新网络，PPO可以实现一条轨迹多次更新
        old_actor = deepcopy(self.actor)
        for _ in range(self.config.n_update_per_epoch):
            v = self.critic(states).squeeze()
            advantage = reward_to_go - v
            # 正则化advantage
            advantage = (advantage - advantage.mean()) / (advantage.std() + 1e-10)
            advantage = advantage.detach()

            dist = self.actor(states)
            log_prob = dist.log_prob(actions)
            old_dist = old_actor(states)
            old_log_prob = old_dist.log_prob(actions).detach()

            ratio = torch.exp(log_prob - old_log_prob).squeeze()
            entropy = dist.entropy().mean()
            surr1 = ratio * advantage
            surr2 = torch.clamp(ratio, 1 - self.config.clip_param, 1 + self.config.clip_param) * advantage
            actor_loss = (-torch.min(surr1, surr2)).mean() - self.config.lambda_entropy * entropy

            # 更新网络
            self.actor.update(actor_loss)

            v = self.critic(states).squeeze()
            # 更新critic
            critic_loss = self.critic.criterion(v, reward_to_go)
            self.critic.update(critic_loss)
