import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import ptan


class AtariA2C(nn.Module):
    def __init__(self, input_shape, n_actions, input_size=1):
        super(AtariA2C, self).__init__()

        obs_action = input_shape

        self.conv = nn.Sequential(
            nn.Conv2d(obs_action[0], 32, kernel_size=3, stride=2, padding=1),
            nn.BatchNorm2d(32),
            nn.ELU(True),
            nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1),
            nn.BatchNorm2d(32),
            nn.ELU(True),
            nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1),
            nn.BatchNorm2d(32),
            nn.ELU(True),
            nn.Conv2d(32, 32, kernel_size=3, stride=2, padding=1),
            nn.BatchNorm2d(32),
            nn.ELU(True),
            nn.Conv2d(32, 32, kernel_size=3, stride=1, padding=1),
            nn.BatchNorm2d(32),
            nn.ELU(True)
        )

        self.lstm = nn.LSTM(num_layers=1, input_size=input_size, hidden_size=256, batch_first=True)
        self.policy = nn.Sequential(
            nn.Linear(256, 256),
            nn.ReLU(),
            nn.Linear(256, n_actions)
        )

        self.value = nn.Sequential(
            nn.Linear(256, 256),
            nn.ReLU(),
            nn.Linear(256, 1)
        )

        self.input_size = input_size

    def forward(self, x):
        inputs, (hx, cx) = x
        fx = inputs.float() / 255.0
        conv_out = self.conv(fx).view(fx.size()[0], -1, self.input_size)
        lstm_out, (hx, cx) = self.lstm(conv_out, (hx, cx))
        x = lstm_out[:, -1, :]
        return self.policy(x), self.value(x), (hx, cx)
    

class PolicyLstmAgent(ptan.agent.BaseAgent):

    def __init__(self, dqn_model, num_envs, apply_softmax=False, action_selector=ptan.actions.ProbabilityActionSelector(), device="cpu", preprocessor=ptan.agent.default_states_preprocessor):
        '''
        param dqn_model: dqn网络模型，训练的的网络
        param action_selector: 动作选择器
        '''

        self.dqn_model = dqn_model
        self.action_selector = action_selector
        self.preprocessor = preprocessor
        self.device = device
        self.num_envs = num_envs
        self.apply_softmax = apply_softmax


    def initial_state(self, env_num=1):
        """
        Should create initial empty state for the agent. It will be called for the start of the episode
        :return: Anything agent want to remember
        """
        return (
            np.zeros((self.dqn_model.lstm.num_layers, env_num, self.dqn_model.lstm.hidden_size), dtype=np.float32),
            np.zeros((self.dqn_model.lstm.num_layers, env_num, self.dqn_model.lstm.hidden_size), dtype=np.float32)
        )
    

    @torch.no_grad()
    def __call__(self, states, agent_states=None):
        # 如果定义了预处理器，则将state进行预处理
        if agent_states is None:
            agent_states = self.initial_state(states.size(0))
        elif self.preprocessor is not None:
            hx = [torch.tensor(agent_state[0]) for agent_state in agent_states]
            cx = [torch.tensor(agent_state[1]) for agent_state in agent_states]

            hx = torch.cat(hx, dim=1).to(device=self.device)
            cx = torch.cat(cx, dim=1).to(device=self.device)
            
        if self.preprocessor is not None:
            states = self.preprocessor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)

        # 用传入的模型计算预测q值或者动作概率
        logit, _, (hx, cx) = self.dqn_model((states, (hx, cx)))
        # 如果需要使用softmax计算
        if self.apply_softmax:
            probs_v = F.softmax(logit, dim=1)
        # value = value.data.cpu().numpy()
        probs = probs_v.data.cpu().numpy()
        actions = self.action_selector(probs)
        hx = np.split(hx.cpu().numpy(), probs.shape[0], axis=1)
        cx = np.split(cx.cpu().numpy(), probs.shape[0], axis=1)
        return actions, [(h, c) for h, c in zip(hx, cx)]