import ptan
import numpy as np
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils

HID_SIZE = 512
MM_EMBEDDINGS_DIM = 512
MM_HIDDEN_SIZE = 256
MM_MAX_DICT_SIZE = 500

TOKEN_UNK = "#unk"

class ModelActor(nn.Module):
    def __init__(self, obs_size, act_size,  max_dict_size=MM_MAX_DICT_SIZE):
        '''
        :param obs_size: 观测的环境维度
        :param act_size: 执行的动作的维度
        '''
        super(ModelActor, self).__init__()

        input_shape = (obs_size[2], obs_size[0], obs_size[1])
        # 卷积网路，应该观测的图像环境状态
        self.conv = nn.Sequential(
            nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=2),  # 输出尺寸 64x64
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=4, stride=2),  # 输出尺寸 31x31
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),  # 输出尺寸 14x14
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=2),  # 输出尺寸 7x7
            nn.ReLU(),
            nn.Conv2d(256, 512, kernel_size=3, stride=2),  # 输出尺寸 4x4
            nn.ReLU(),
        )

        # 词嵌入向量
        self.emb = nn.Embedding(max_dict_size, MM_EMBEDDINGS_DIM)
        self.rnn = nn.LSTM(MM_EMBEDDINGS_DIM, MM_HIDDEN_SIZE, batch_first=True)

        conv_out_size = self._get_conv_out(input_shape)

        # 作用 看名称像是方差：是用来控制动作的探索程度的
        # 怎么更新？：在训练的过程中，会不断的更新这个参数，更新的逻辑就在于计算熵损失以及计算动作优势大小的时候会参与计算，然后在梯度更新的时候，会自动更新这个参数到合适的大小
        self.logstd = nn.Parameter(torch.zeros(act_size))

        # 执行的动作策略
        self.mu = nn.Sequential(
            nn.Linear(conv_out_size + MM_HIDDEN_SIZE * 2, act_size),
            nn.Tanh()
        )

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def _concat_features(self, img_out, rnn_hidden):
        '''
        img_out：图像状态数据特征采集输入
        rnn_hidden：文本状态数据特征采集输入
        '''
        batch_size = img_out.size()[0]
        if isinstance(rnn_hidden, tuple):
            # todo 什么情况下rnn_hidden是元组
            # 这里是将文本特征的维度转换为（batch_size, -1）的维度
            flat_h = list(map(lambda t: t.view(batch_size, -1), rnn_hidden))
            rnn_h = torch.cat(flat_h, dim=1)
        else:
            rnn_h = rnn_hidden.view(batch_size, -1)
        # todo rnn进行维度转换是一定会成功的吗？原因是什么？
        # 转换为相同的维度后，才可以继续拼接两个特征状态的数据
        return torch.cat((img_out, rnn_h), dim=1)

    def forward(self, x):
        x_img, x_text = x
        assert isinstance(x_text, rnn_utils.PackedSequence)

        emb_out = self.emb(x_text.data)
        emb_out_sq = rnn_utils.PackedSequence(emb_out, x_text.batch_sizes)
        _, rnn_h = self.rnn(emb_out_sq)

        conv_out = self.conv(x_img).view(x_img.size()[0], -1)
        # 利用组合特征得到预测的动作概率以及动作Q值
        feats = self._concat_features(conv_out, rnn_h)
        return self.mu(feats)

class ModelCritic(nn.Module):
    '''
    trop信赖域策略优化评价网络
    ACKTR算法中使用的critic网络
    ppt优化评价网络
    '''

    def __init__(self, obs_size, max_dict_size=MM_MAX_DICT_SIZE):
        '''
        :param obs_size: 观测的环境维度
        :param act_size: 执行的动作的维度
        '''
        super(ModelCritic, self).__init__()

        input_shape = (obs_size[2], obs_size[0], obs_size[1])
        # 卷积网路，应该观测的图像环境状态
        self.conv = nn.Sequential(
            nn.Conv2d(input_shape[0], 32, kernel_size=8, stride=2),  # 输出尺寸 64x64
            nn.ReLU(),
            nn.Conv2d(32, 64, kernel_size=4, stride=2),  # 输出尺寸 31x31
            nn.ReLU(),
            nn.Conv2d(64, 128, kernel_size=4, stride=2),  # 输出尺寸 14x14
            nn.ReLU(),
            nn.Conv2d(128, 256, kernel_size=3, stride=2),  # 输出尺寸 7x7
            nn.ReLU(),
            nn.Conv2d(256, 512, kernel_size=3, stride=2),  # 输出尺寸 4x4
            nn.ReLU(),
        )

        # 词嵌入向量
        self.emb = nn.Embedding(max_dict_size, MM_EMBEDDINGS_DIM)
        self.rnn = nn.LSTM(MM_EMBEDDINGS_DIM, MM_HIDDEN_SIZE, batch_first=True)

        conv_out_size = self._get_conv_out(input_shape)

        # 执行的动作策略
        self.value = nn.Sequential(
            nn.Linear(conv_out_size + MM_HIDDEN_SIZE * 2, 1)
        )

    def _get_conv_out(self, shape):
        o = self.conv(torch.zeros(1, *shape))
        return int(np.prod(o.size()))

    def _concat_features(self, img_out, rnn_hidden):
        '''
        img_out：图像状态数据特征采集输入
        rnn_hidden：文本状态数据特征采集输入
        '''
        batch_size = img_out.size()[0]
        if isinstance(rnn_hidden, tuple):
            # todo 什么情况下rnn_hidden是元组
            # 这里是将文本特征的维度转换为（batch_size, -1）的维度
            flat_h = list(map(lambda t: t.view(batch_size, -1), rnn_hidden))
            rnn_h = torch.cat(flat_h, dim=1)
        else:
            rnn_h = rnn_hidden.view(batch_size, -1)
        # todo rnn进行维度转换是一定会成功的吗？原因是什么？
        # 转换为相同的维度后，才可以继续拼接两个特征状态的数据
        return torch.cat((img_out, rnn_h), dim=1)

    def forward(self, x):
        x_img, x_text = x
        assert isinstance(x_text, rnn_utils.PackedSequence)

        emb_out = self.emb(x_text.data)
        emb_out_sq = rnn_utils.PackedSequence(emb_out, x_text.batch_sizes)
        _, rnn_h = self.rnn(emb_out_sq)

        conv_out = self.conv(x_img).view(x_img.size()[0], -1)
        # 利用组合特征得到预测的动作概率以及动作Q值
        feats = self._concat_features(conv_out, rnn_h)
        return self.value(feats)


class AgentA2C(ptan.agent.BaseAgent):
    '''
    创建代理器
    '''
    def __init__(self, net, device="cpu", preprocessor=ptan.agent.float32_preprocessor):
        self.net = net
        self.device = device
        self.processor = preprocessor

    def __call__(self, states, agent_states):
        '''
        states: 观测的环境状态
        agent_states：智能体自己的状态，在这里是没有使用的
        '''
        # 创建环境预处理器，将环境状态转换为float32类型
        if self.processor is not None:
            states = self.processor(states)
            if torch.is_tensor(states):
                states = states.to(self.device)

        # 通过环境状态预测执行的动作
        mu_v = self.net(states)
        mu = mu_v.data.cpu().numpy()
        logstd = self.net.logstd.data.cpu().numpy()
        # 该动作的作用，是对预测的动作添加随机噪音，实现动作的探索
        actions = mu + np.exp(logstd) * np.random.normal(size=logstd.shape)
        # 将执行的动作压缩到-1到1中，可能是因为输入给网络的值不能超过-1和1
        actions = np.clip(actions, -1, 1)
        return actions, agent_states
