import ptan
import numpy as np

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions import Categorical

HID_SIZE = 128

class ModelPPO(nn.Module):
    def __init__(self, obs_size, act_size):
        '''
        :param obs_size: 观测的环境维度
        :param act_size: 执行的动作的维度
        '''
        super(ModelPPO, self).__init__()

        self.conv = nn.Sequential(
            nn.Conv2d(obs_size[0], 64, kernel_size=8, stride=4),
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=1),
            nn.ReLU()
        )

        conv_out_size = self._get_conv_out(obs_size)
        self.lstm = nn.LSTM(conv_out_size, 128)
        # 初始化
        for name, param in self.lstm.named_parameters():
            if "bias" in name:
                nn.init.constant_(param, 0)
            elif "weight" in name:
                nn.init.orthogonal_(param, 1.0)
        self.action_linear = nn.Linear(128, act_size)
        self.critic_linear = nn.Linear(128, 1)

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))
    

    def get_states(self, x, lstm_state, done):
        hidden = self.conv(x / 255.0)
        batch_size = lstm_state[0].shape[1]
        hidden = hidden.reshape((-1, batch_size, self.lstm.input_size))
        done = done.reshape((-1, batch_size))
        new_hidden = []
        for h, d in zip(hidden, done):
            h, lstm_state = self.lstm(
                h.unsqueeze(0),
                (
                    (1.0 - d).view(1, -1, 1) * lstm_state[0],
                    (1.0 - d).view(1, -1, 1) * lstm_state[1],
                ),
            )
            new_hidden += [h]
        new_hidden = torch.flatten(torch.cat(new_hidden), 0, 1)
        return new_hidden, lstm_state

    
    def get_value(self, x, lstm_state, done):
        hidden, _ = self.get_states(x, lstm_state, done)
        return self.critic_linear(hidden)

    def forward(self, x, lstm_state, done, action=None):
        hidden, lstm_state = self.get_states(x, lstm_state, done)
        logits = self.action_linear(hidden)
        probs = Categorical(logits=logits)
        if action is None and not self.training:
            action = probs.sample()
        elif action is None and self.training:
            action = torch.argmax(logits, dim=-1)
        return action, probs.log_prob(action), probs.entropy(), self.critic_linear(hidden), lstm_state
    

class DQNLstmAgent(ptan.agent.BaseAgent):

    def __init__(self, dqn_model, num_envs, action_selector=ptan.actions.ProbabilityActionSelector(), device="cpu", preprocessor=ptan.agent.default_states_preprocessor):
        '''
        param dqn_model: dqn网络模型，训练的的网络
        param action_selector: 动作选择器
        '''

        self.dqn_model = dqn_model
        self.action_selector = action_selector
        self.preprocessor = preprocessor
        self.device = device
        self.num_envs = num_envs
        self.next_lstm_state = (
            torch.zeros(dqn_model.lstm.num_layers, self.num_envs, dqn_model.lstm.hidden_size).to(device),
            torch.zeros(dqn_model.lstm.num_layers, self.num_envs, dqn_model.lstm.hidden_size).to(device),
        )
        self.cur_value = None
        self.cur_logprob = None


    def initial_state(self):
        """
        Should create initial empty state for the agent. It will be called for the start of the episode
        :return: Anything agent want to remember
        """
        return None


    def clone_next_lstm_state(self):
        return (
            self.next_lstm_state[0].clone(),
            self.next_lstm_state[1].clone(),
        )
    

    @torch.no_grad()
    def __call__(self, states, agent_states=None):
        # 如果定义了预处理器，则将state进行预处理
        if self.preprocessor is not None:
            states = self.preprocessor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)
        # 用传入的模型计算预测q值或者动作概率
        action, self.cur_logprob, _, self.cur_value, self.next_lstm_state = self.dqn_model(states, self.next_lstm_state)
        return action.cpu().numpy(), zip(self.cur_logprob.cpu().numpy(), self.cur_value.cpu().numpy())

    # 增加保存和加载next_lstm_state的代码
    def save_state_dict(self, checkpoints):
        checkpoints['next_lstm_state'] = self.next_lstm_state

    def load_state_dict(self, checkpoints):
        self.next_lstm_state = checkpoints['next_lstm_state']