from light_rl.agent.agent import AgentBase
from light_rl.model.model import Model
from light_rl.utils.dict_wrapper import DictWrapper
from light_rl.utils.buffer import EpochBuffer
from light_rl.utils.utils import load_config, get_device
from torch.utils.tensorboard import SummaryWriter
import torch
import numpy as np
from typing import Union
import os
import json


class AgentREINFORCE(AgentBase):
    def __init__(self, model: Model, config_path: str, writer: SummaryWriter = None, model_dir: str = "") -> None:
        super().__init__(config_path, writer, model_dir)
        # 加载配置文件
        self.state_dim = self.full_config.env.state_dim
        self.action_dim = self.full_config.env.action_dim

        self.model = model

        self.buffer = EpochBuffer(self.device)
        self.resume_model()

    def react(self, state: np.ndarray, train: bool = True) -> Union[np.ndarray, int]:
        state = torch.tensor(state, dtype=torch.float32, device=self.device)
        dist = self.model(state)
        # 从概率分布中采样
        action = dist.sample()
        return action.detach().cpu().numpy()

    def learn(self, state, action, reward, next_state, done):
        super().learn(state, action, reward, next_state, done)
        # 保存数据
        # 如果没有结束，保存数据
        self.buffer.push(state, action, reward, next_state, done)
        if not done:
            return
        # 计算累积回报
        R = torch.tensor(0, dtype=torch.float32, device=self.device)
        loss = torch.tensor(0, dtype=torch.float32, device=self.device)
        states, actions, rewards, next_states, dones = self.buffer.get()

        for i in reversed(range(len(self.buffer))):
            R = self.config.gamma * R + rewards[i]
            dist = self.model(states[i])
            log_prob = dist.log_prob(actions[i])
            loss = loss - R * log_prob
        loss = loss / len(self.buffer)
        self.model.update(loss)
        # 清空数据
        self.buffer.clear()


class AgentREINFORCEWithBaseLine(AgentBase):
    def __init__(self, policy_model: Model, value_model: Model, config_path: str, writer: SummaryWriter = None,
                 model_dir: str = "") -> None:
        super().__init__(config_path, writer, model_dir)
        # 加载配置文件
        self.state_dim = self.full_config.env.state_dim
        self.action_dim = self.full_config.env.action_dim

        self.policy_model = policy_model
        self.value_model = value_model

        self.buffer = EpochBuffer(self.device)
        self.resume_model()

    def react(self, state: np.ndarray, train: bool = True) -> Union[np.ndarray, int]:
        state = torch.tensor(state, dtype=torch.float32, device=self.device)
        dist = self.policy_model(state)
        # 从概率分布中采样
        action = dist.sample()
        return action.detach().cpu().numpy()

    def learn(self, state, action, reward, next_state, done):
        super().learn(state, action, reward, next_state, done)
        # 保存数据
        # 如果没有结束，保存数据
        self.buffer.push(state, action, reward, next_state, done)
        if not done:
            return
        # 计算累积回报
        states, actions, rewards, next_states, dones = self.buffer.get()

        R = torch.tensor(0, dtype=torch.float32, device=self.device)
        value_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
        policy_loss = torch.tensor(0, dtype=torch.float32, device=self.device)
        for i in reversed(range(len(self.buffer))):
            value_target = self.value_model(states[i])
            R = self.config.gamma * R + rewards[i]
            value_loss = value_loss + value_target[0] - R
            dist = self.policy_model(states[i])
            log_prob = dist.log_prob(actions[i])
            policy_loss = policy_loss + value_loss.detach() * log_prob
        policy_loss = policy_loss / len(self.buffer)
        value_loss = value_loss / len(self.buffer)

        self.policy_model.update(policy_loss)
        self.value_model.update(value_loss)

        # 清空数据
        self.buffer.clear()
