# Copyright (c) 2015-present, Facebook, Inc.
# All rights reserved.
import torch
import torch.nn as nn
from functools import partial

from lib.vision_transformer import VisionTransformer, _cfg
from timm.models.registry import register_model
# from timm.models.layers import trunc_normal_
import torch.nn.functional as F
import numpy as np

# __all__ = [
#     'deit_tiny_patch16_224', 'deit_small_patch16_224', 'deit_base_patch16_224',
#     'deit_tiny_distilled_patch16_224', 'deit_small_distilled_patch16_224',
#     'deit_base_distilled_patch16_224', 'deit_base_patch16_384',
#     'deit_base_distilled_patch16_384',
# ]
__all__ = ['deit_small_patch16_224', 'deit_base_patch16_224', 'deit_base_patch16_384',
           ]


class DeiT(VisionTransformer):
    def __init__(self, in_chans = 1, img_size = 256, *args, **kwargs):
        super().__init__(in_chans = in_chans, img_size = img_size, *args, **kwargs)
        num_patches = self.patch_embed.num_patches

        self.pos_embed = nn.Parameter(
            torch.zeros(1, num_patches + 1, self.embed_dim))  # 为什么是初始化为全0，VIT源码是torch.randn(1, num_patches + 1, dim)

    def forward(self, x):
        # taken from https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/vision_transformer.py
        # with slight modifications to add the dist_token
        B = x.shape[0]

        x = self.patch_embed(x)
        pe = self.pos_embed
        x = x + pe
        x = self.pos_drop(x)

        for blk in self.blocks:
            x = blk(x)

        x = self.norm(x)
        return x


@register_model
def deit_small_patch16_224(in_chans = 1, img_size = 256, pretrained = False, **kwargs):
    '''
    只是用来转化维度的，实际上直接实例化DeiT()就行了
    '''
    model = DeiT(
        patch_size = 16, embed_dim = 384, depth = 8, num_heads = 6, mlp_ratio = 4, qkv_bias = True,
        norm_layer = partial(nn.LayerNorm, eps = 1e-6), in_chans = in_chans, img_size = img_size, **kwargs)
    model.default_cfg = _cfg()
    # if pretrained:
    #     ckpt = torch.load('pretrained/deit_small_patch16_224-cd65a155.pth')
    #     model.load_state_dict(ckpt['model'], strict=False)

    pe = model.pos_embed[:, 1:, :].detach()  # 第二维的size减少了1,torch.no_grad函数与detach函数使用效果一样，放入上下文管理器中的操作不构建计算图，节省内存和显存消耗。
    # 下面注释的这一段应该是想把尺寸放缩到固定大小（12*16=192），才这样做的
    # pe = pe.transpose(-1, -2)#以输入是（1，384，1024为例），1，1024，384
    # pe = pe.view(pe.shape[0], pe.shape[1], int(np.sqrt(pe.shape[2])), int(np.sqrt(pe.shape[2])))#1，384，32，32
    # pe = F.interpolate(pe, size=(12, 16), mode='bilinear', align_corners=True)#1，384，12，16
    # pe = pe.flatten(2)#1，384，192
    # pe = pe.transpose(-1, -2)
    model.pos_embed = nn.Parameter(pe)
    model.head = nn.Identity()
    return model


@register_model
def deit_base_patch16_224(pretrained = False, **kwargs):
    model = DeiT(
        patch_size = 16, embed_dim = 768, depth = 12, num_heads = 12, mlp_ratio = 4, qkv_bias = True,
        norm_layer = partial(nn.LayerNorm, eps = 1e-6), in_chans = 1, img_size = 256, **kwargs)
    model.default_cfg = _cfg()
    if pretrained:
        ckpt = torch.load('pretrained/deit_base_patch16_224-b5f2ef4d.pth')
        model.load_state_dict(ckpt['model'], strict = False)

    pe = model.pos_embed[:, 1:, :].detach()
    pe = pe.transpose(-1, -2)
    pe = pe.view(pe.shape[0], pe.shape[1], int(np.sqrt(pe.shape[2])), int(np.sqrt(pe.shape[2])))
    pe = F.interpolate(pe, size = (12, 16), mode = 'bilinear', align_corners = True)
    pe = pe.flatten(2)
    pe = pe.transpose(-1, -2)
    model.pos_embed = nn.Parameter(pe)
    model.head = nn.Identity()
    return model


@register_model
def deit_base_patch16_384(pretrained = False, **kwargs):
    model = DeiT(
        img_size = 384, patch_size = 16, embed_dim = 768, depth = 12, num_heads = 12, mlp_ratio = 4, qkv_bias = True,
        norm_layer = partial(nn.LayerNorm, eps = 1e-6), in_chans = 1, **kwargs)
    model.default_cfg = _cfg()
    if pretrained:
        ckpt = torch.load('pretrained/deit_base_patch16_384-8de9b5d1.pth')
        model.load_state_dict(ckpt["model"])

    pe = model.pos_embed[:, 1:, :].detach()
    pe = pe.transpose(-1, -2)
    pe = pe.view(pe.shape[0], pe.shape[1], int(np.sqrt(pe.shape[2])), int(np.sqrt(pe.shape[2])))
    pe = F.interpolate(pe, size = (24, 32), mode = 'bilinear', align_corners = True)
    pe = pe.flatten(2)
    pe = pe.transpose(-1, -2)
    model.pos_embed = nn.Parameter(pe)
    model.head = nn.Identity()
    return model