import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import torch

class HeadNet(nn.Module):
    def __init__(self, reshape_size, n_actions=4):
        '''
        reshape_size: 特征提取层的输出维度
        n_actions: int, number of actions 动作的维度
        '''
        super(HeadNet, self).__init__()
        self.fc1 = nn.Linear(reshape_size, 512)
        self.fc2 = nn.Linear(512, n_actions)

    def forward(self, x):
        x = F.relu(self.fc1(x))
        x = self.fc2(x)
        return x


class NetWithPrior(nn.Module):
    def __init__(self, net, prior, prior_scale=1.):
        super(NetWithPrior, self).__init__()
        self.net = net
        self.prior_scale = prior_scale
        if self.prior_scale > 0.:
            self.prior = prior

    def forward(self, x, k):
        '''
        x: 观察
        k： 选择的head索引，如果为None，则返回所有head的输出
        '''
        if hasattr(self.net, "net_list"): # 确保有多头
            if k is not None:
                if self.prior_scale > 0.:
                    # 使用指定的head进行前向传播，并加上先验动作网络的Q值输出 todo 作用
                    return self.net(x, k) + self.prior_scale * self.prior(x, k).detach()
                else:
                    # 使用指定的head进行前向传播
                    return self.net(x, k)
            else:
                core_cache = self.net._core(x) # 获取特征
                net_heads = self.net._heads(core_cache) # 获取所有动作Q值 head的输出
                if self.prior_scale <= 0.: # 没有先验网络
                    return net_heads # 直接返回所有head的输出
                else:
                    prior_core_cache = self.prior._core(x) # 先验网络的特征
                    prior_heads = self.prior._heads(prior_core_cache) # 先验网络的所有head输出
                    return [n + self.prior_scale * p.detach() for n, p in zip(net_heads, prior_heads)] # 返回加上先验网络的所有head输出
        else:
            raise ValueError("Only works with a net_list model")


class EnsembleNet(nn.Module):
    def __init__(self, n_ensemble, n_actions, h, w, num_channels):
        '''
        n_ensemble: int, number of ensemble heads todo 作用
        n_actions: int, number of actions
        h: int, height of input image
        w: int, width of input image
        num_channels: int, number of channels of input image
        '''
        super(EnsembleNet, self).__init__()
        self.core_net = CoreNet(h=h, w=w, num_channels=num_channels)
        reshape_size = self.core_net.reshape_size # CoreNet输出的特征维度
        # todo
        self.net_list = nn.ModuleList([HeadNet(reshape_size=reshape_size, n_actions=n_actions) for k in range(n_ensemble)])

    def _core(self, x):
        return self.core_net(x)

    def _heads(self, x):
        return [net(x) for net in self.net_list]

    def forward(self, x, k=None):
        '''
        X: input tensor, shape (batch_size, num_channels, h, w) 观察图片张量
        k: int or None, index of the head to use for forward pass. If None, return outputs of all heads. 
        使用哪个头进行前向传播，如果为None，则返回所有头的输出
        '''
        if k is not None:
            return self.net_list[k](self.core_net(x))
        else:
            core_cache = self._core(x)
            net_heads = self._heads(core_cache)
            return net_heads


class CoreNet(nn.Module):
    def __init__(self, h, w, num_channels=4):
        '''
        核心网络，卷积层，提取观察特征
        h: int, height of input image 图片的高度
        w: int, width of input image 图片的宽度
        num_channels: int, number of channels of input image 图片的通道数
        '''
        super(CoreNet, self).__init__()
        self.num_channels = num_channels
        self.conv1 = nn.Conv2d(self.num_channels, 32, 8, 4)
        self.conv2 = nn.Conv2d(32, 64, 4, 2)
        self.conv3 = nn.Conv2d(64, 64, 3, 1)

        self.reshape_size = self._get_conv_out((self.num_channels, h, w)) # 卷积层输出的特征维度

        
    
    def _get_conv_out(self, shape):
        o = self.conv1(torch.zeros(1, *shape))
        o = self.conv2(o)
        o = self.conv3(o)
        return int(np.prod(o.size())) # 计算张量的元素个数

    def forward(self, x):
        x = F.relu(self.conv1(x))
        x = F.relu(self.conv2(x))
        x = F.relu(self.conv3(x))
        # size after conv3 将张量展平
        x = x.view(-1, self.reshape_size)
        return x
