# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
"""
Backbone modules.
"""
from collections import OrderedDict

import torch
import torch.nn.functional as F
import torchvision
from torch import nn
from torchvision.models._utils import IntermediateLayerGetter
from typing import Dict, List

from util.misc import NestedTensor, is_main_process
try:
    from .position_encoding import build_position_encoding
except:
    from models.position_encoding import build_position_encoding

class FrozenBatchNorm2d(torch.nn.Module):
    """
    BatchNorm2d where the batch statistics and the affine parameters are fixed.

    Copy-paste from torchvision.misc.ops with added eps before rqsrt,
    without which any other models than torchvision.models.resnet[18,34,50,101]
    produce nans.
    """

    def __init__(self, n):
        super(FrozenBatchNorm2d, self).__init__()
        self.register_buffer("weight", torch.ones(n)) # 缩放参数，n为特征数量(同下)
        self.register_buffer("bias", torch.zeros(n))  # 偏移参数
        self.register_buffer("running_mean", torch.zeros(n))  # 运行时均值
        self.register_buffer("running_var", torch.ones(n))    # 运行时方差

    def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict,
                              missing_keys, unexpected_keys, error_msgs):
        num_batches_tracked_key = prefix + 'num_batches_tracked'
        if num_batches_tracked_key in state_dict:
            del state_dict[num_batches_tracked_key]

        super(FrozenBatchNorm2d, self)._load_from_state_dict(
            state_dict, prefix, local_metadata, strict,
            missing_keys, unexpected_keys, error_msgs)

    def forward(self, x):
        # move reshapes to the beginning
        # to make it fuser-friendly
        w = self.weight.reshape(1, -1, 1, 1) # 形状重塑为(1,n,1,1)
        b = self.bias.reshape(1, -1, 1, 1)   # 形状重塑为(1,n,1,1)
        rv = self.running_var.reshape(1, -1, 1, 1)  # 形状重塑为(1,n,1,1)
        rm = self.running_mean.reshape(1, -1, 1, 1) # 形状重塑为(1,n,1,1)
        eps = 1e-5 # 添加一个小的常数 eps，以避免除以零的数值不稳定。
        scale = w * (rv + eps).rsqrt() # 计算缩放因子，rsqrt 是倒数平方根操作
        bias = b - rm * scale # 计算偏移量
        return x * scale + bias # 对输入张量 x 应用批量归一化操作。


class BackboneBase(nn.Module):

    def __init__(self, backbone: nn.Module, train_backbone: bool, num_channels: int, return_interm_layers: bool):
        super().__init__()
        for name, parameter in backbone.named_parameters():
            if not train_backbone or 'layer2' not in name and 'layer3' not in name and 'layer4' not in name:
                parameter.requires_grad_(False)
        if return_interm_layers:
            return_layers = {"layer1": "0", "layer2": "1", "layer3": "2", "layer4": "3"}
        else:
            return_layers = {'layer4': "0"}
        self.body = IntermediateLayerGetter(backbone, return_layers=return_layers)
        self.num_channels = num_channels

    def forward(self, tensor_list: NestedTensor):
        xs = self.body(tensor_list.tensors)
        out: Dict[str, NestedTensor] = {}
        for name, x in xs.items():
            m = tensor_list.mask
            assert m is not None
            '''代码解释：
                1. m[None]：在原始掩码 m 的前面添加一个新维度，使其形状变为 (1, 2, 608, 767)。这一步是为了适配 F.interpolate 的输入要求。
                2. .float()：将掩码转换为浮点数，因为插值操作要求输入为浮点数。
                3. F.interpolate(..., size=x.shape[-2:])：使用 F.interpolate 进行插值操作，将掩码的形状调整为特征图的形状 (19, 24)，size=x.shape[-2:]：指定插值后的大小为特征图的最后两个维度，即 (19, 24)。
                4. .to(torch.bool)：将插值后的掩码转换回布尔类型，因为掩码通常用于表示二值信息。
                5. [0]：移除在第一步添加的新维度，使掩码的形状恢复为 (2, 19, 24)。
            '''
            mask = F.interpolate(m[None].float(), size=x.shape[-2:]).to(torch.bool)[0]
            out[name] = NestedTensor(x, mask)
        return out


class Backbone(BackboneBase):
    """ResNet backbone with frozen BatchNorm."""
    def __init__(self, name: str,
                 train_backbone: bool,
                 return_interm_layers: bool,
                 dilation: bool):
        backbone = getattr(torchvision.models, name)(
            replace_stride_with_dilation=[False, False, dilation],
            pretrained=is_main_process(), norm_layer=FrozenBatchNorm2d)
        num_channels = 512 if name in ('resnet18', 'resnet34') else 2048
        super().__init__(backbone, train_backbone, num_channels, return_interm_layers)


class Joiner(nn.Sequential):
    def __init__(self, backbone, position_embedding):
        super().__init__(backbone, position_embedding)

    def forward(self, tensor_list: NestedTensor):
        xs = self[0](tensor_list)     # Restnet50的输出特征tensors及相应插值后的掩码mask
        out: List[NestedTensor] = []
        pos = []
        for name, x in xs.items():
            out.append(x)
            # position encoding
            pos.append(self[1](x).to(x.tensors.dtype))

        return out, pos


def build_backbone(args):
    position_embedding = build_position_encoding(args)
    train_backbone = args.lr_backbone > 0
    return_interm_layers = args.masks
    backbone = Backbone(args.backbone, train_backbone, return_interm_layers, args.dilation)
    model = Joiner(backbone, position_embedding)
    model.num_channels = backbone.num_channels
    return model

if __name__ == '__main__':
    import argparse
    from main import get_args_parser
    parser = argparse.ArgumentParser('DETR training and evaluation script', parents=[get_args_parser()])
    args = parser.parse_args()
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    backbone = build_backbone(args).to(device)

    # 4.设置数据示例
    mask = torch.rand(2, 608, 767) > 0.5  # 随机生成布尔掩码
    mask = mask.to(torch.bool)  # 确保数据类型为 torch.bool
    tensors = torch.rand((2, 3, 607, 767), dtype=torch.float32)  # 随机生成浮点张量，值在 [0, 1] 范围内
    samples = NestedTensor(tensors, mask).to(device)

    features, pos = backbone(samples)
    print(features)