# Copyright (C) 2022-present Naver Corporation. All rights reserved.
# Licensed under CC BY-NC-SA 4.0 (non-commercial use only).

# --------------------------------------------------------
# Heads for downstream tasks
# --------------------------------------------------------

"""
A head is a module where the __init__ defines only the head hyperparameters.
A method setup(croconet) takes a CroCoNet and set all layers according to the head and croconet attributes.
The forward takes the features as well as a dictionary img_info containing the keys 'width' and 'height'
"""

import torch.nn as nn
from .dpt_block import DPTOutputAdapter

# 下游像素级任务的密集预测输出头模块
class PixelwiseTaskWithDPT(nn.Module):
    """ DPT module for CroCo.
    by default, hooks_idx will be equal to:
    * for encoder-only: 4 equally spread layers
    * for encoder+decoder: last encoder + 3 equally spread layers of the decoder
    """

    def __init__(self, *, hooks_idx=None, layer_dims=[96,192,384,768], output_width_ratio=1, num_channels=1, postprocess=None, **kwargs): # 使用默认参数
        super(PixelwiseTaskWithDPT, self).__init__()
        self.return_all_blocks = True # backbone needs to return all layers，默认需要返回所有块输出
        self.postprocess = postprocess # 后处理，默认None
        self.output_width_ratio = output_width_ratio # 输出宽度比例，默认1(不变)
        self.num_channels = num_channels # 输出通道，默认有置信度通道(2)
        self.hooks_idx = hooks_idx # 中间层索引
        self.layer_dims = layer_dims # 中间层维度

    # 设置DPT密集预测输出头
    def setup(self, croconet):
        dpt_args = {'output_width_ratio': self.output_width_ratio, 'num_channels': self.num_channels} # 给适配器的参数
        if self.hooks_idx is None:
            if hasattr(croconet, 'dec_blocks'): # encoder + decoder，模型有编码器和解码器
                step = {8: 3, 12: 4, 24: 8}[croconet.dec_depth] # 根据解码器深度(默认8)选择step(默认3)
                hooks_idx = [croconet.dec_depth+croconet.enc_depth-1-i*step for i in range(3,-1,-1)] # 中间层索引，解码器深度(微调预训练12)+编码器深度(微调预训练24)-1-i(3,2,1,0)*step(微调预训练4)，[23,27,31,35](从最后开始，以解码器深度1/3为间隔)
            else: # encoder only，模型只有编码器
                step = croconet.enc_depth//4
                hooks_idx = [croconet.enc_depth-1-i*step for i in range(3,-1,-1)]
            self.hooks_idx = hooks_idx
            print(f'PixelwiseTaskWithDPT: automatically setting hook_idxs={self.hooks_idx}')
        dpt_args['hooks'] = self.hooks_idx # 中间层索引
        dpt_args['layer_dims'] = self.layer_dims # 中间层维度
        self.dpt = DPTOutputAdapter(**dpt_args) # 密集预测输出适配器
        dim_tokens = [croconet.enc_embed_dim if hook<croconet.enc_depth else croconet.dec_embed_dim for hook in self.hooks_idx] # token嵌入维度，中间层索引<编码器深度，则为编码器维度，否则为解码器维度
        dpt_init_args = {'dim_tokens_enc': dim_tokens} # 密集预测输出适配器初始化参数，编码器嵌入维度
        self.dpt.init(**dpt_init_args) # 密集预测输出适配器初始化

    def forward(self, x, img_info): # 输入所有块输出(编码器1+解码器，List)，预测图像尺寸
        out = self.dpt(x, image_size=(img_info['height'],img_info['width']))
        if self.postprocess: out = self.postprocess(out) # 不进行后处理
        return out
