from light_rl.agent.agent import AgentBase
from light_rl.model.model import Model
from light_rl.utils.dict_wrapper import DictWrapper
from light_rl.utils.buffer import create_reply_buffer
from light_rl.utils.utils import load_config, get_device
import torch
from torch.utils.tensorboard import SummaryWriter
import numpy as np
import os
import json
from typing import Union
from copy import deepcopy


class AgentDQN(AgentBase):

    def __init__(self, model: Model, config_path: str, writer: SummaryWriter = None, model_dir: str = "") -> None:
        super().__init__(config_path, writer, model_dir)
        self.model = model
        self.buffer = create_reply_buffer(config_path)
        self.state_dim = self.full_config.env.state_dim
        self.action_dim = self.full_config.env.action_dim
        self.resume_model()

    def react(self, state: np.ndarray, train: bool = True) -> Union[np.ndarray, int]:
        state = torch.tensor(state).to(self.device)
        if not train:
            with torch.no_grad():
                action = torch.argmax(self.model(state))
            return action.cpu().numpy()

        # epsilon 贪心 探索
        self.config.epsilon = max(self.config.epsilon * self.config.epsilon_decay, self.config.epsilon_min)
        if np.random.random() < self.config.epsilon:
            action = np.random.randint(0, self.action_dim)
            return action
        else:
            with torch.no_grad():
                action = torch.argmax(self.model(state))
            return action.cpu().numpy()

    def learn(self, state, action, reward, next_state, done):
        super().learn(state, action, reward, next_state, done)
        # 将数据都保存起来
        self.buffer.push(state, action, reward, next_state, done)
        if len(self.buffer) < self.config.start_learn:
            return
        #
        states, actions, rewards, next_states, dones = self.buffer.sample(self.config.batch_size)
        dones = dones.float()
        rewards = rewards.float()
        q_value = self.model(states).gather(1, actions.unsqueeze(1)).squeeze()

        next_q_value, _ = torch.max(self.model(next_states), dim=1, keepdim=True)
        q_target = rewards + (1 - dones) * self.config.gamma * next_q_value.squeeze()
        loss = self.model.criterion(q_value, q_target)
        self.model.update(loss)


class AgentDQN_OffPolicy(AgentDQN):
    def __init__(self, model: Model, config_path: str, writer: SummaryWriter = None, model_dir: str = "") -> None:
        super().__init__(model, config_path, writer, model_dir)
        self.target_model = deepcopy(model)

    def learn(self, state, action, reward, next_state, done):
        super(AgentDQN, self).learn(state, action, reward, next_state, done)
        # 将数据都保存起来
        self.buffer.push(state, action, reward, next_state, done)
        if len(self.buffer) < self.config.start_learn:
            return
        #
        states, actions, rewards, next_states, dones = self.buffer.sample(self.config.batch_size)
        dones = dones.float()
        rewards = rewards.float()
        q_value = self.model(states).gather(1, actions.unsqueeze(1)).squeeze()
        next_q_value, _ = torch.max(self.target_model(next_states), dim=1, keepdim=True)
        q_target = rewards + (1 - dones) * self.config.gamma * next_q_value.squeeze()

        loss = self.model.criterion(q_value, q_target)
        self.model.update(loss)
        if self.step % self.config.update_target == 0:
            self.target_model.load_state_dict(self.model.state_dict())