# Copyright (c) [2012]-[2021] Shanghai Yitu Technology Co., Ltd.
#
# This source code is licensed under the Clear BSD License
# LICENSE file in the root directory of this file
# All rights reserved.
"""
T2T-ViT
"""
import torch
import torch.nn as nn
from ..builder import BACKBONE_NEW
# from timm.models.helpers import load_pretrained
# from timm.models.registry import register_model
# from timm.models.layers import trunc_normal_
import numpy as np
from mmcv.runner import load_checkpoint, BaseModule
from mmdet.utils import get_root_logger
from ..utils.token_transformer import Token_transformer
from ..utils.token_performer import Token_performer
from ..utils.transformer_block import Block, get_sinusoid_encoding
from ..utils.transformer import PatchEmbed

def _cfg(url='', **kwargs):
    # import pdb
    # pdb.set_trace()
    return {
        'url': url,
        'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': None,
        'crop_pct': .9, 'interpolation': 'bicubic',
        'mean': (0.485, 0.456, 0.406), 'std': (0.229, 0.224, 0.225),
        'classifier': 'head',
        **kwargs
    }

default_cfgs = {
    'T2t_vit_7': _cfg(),
    'T2t_vit_10': _cfg(),
    'T2t_vit_12': _cfg(),
    'T2t_vit_14': _cfg(),
    'T2t_vit_19': _cfg(),
    'T2t_vit_24': _cfg(),
    'T2t_vit_t_14': _cfg(),
    'T2t_vit_t_19': _cfg(),
    'T2t_vit_t_24': _cfg(),
    'T2t_vit_14_resnext': _cfg(),
    'T2t_vit_14_wide': _cfg(),
}

token_type_settings = {
    't2t_vit_7': {'default_cfg': 'T2t_vit_7', 'tokens_type':'performer', 'embed_dim':256, 'depth':7,
                  'num_heads':4, 'mlp_ratio':2., 'qk_scale': 256 ** -0.5},
    't2t_vit_10': {'default_cfg': 'T2t_vit_10', 'tokens_type':'performer', 'embed_dim':256, 'depth':10,
                  'num_heads':4, 'mlp_ratio':2., 'qk_scale': 256 ** -0.5},
    't2t_vit_12': {'default_cfg': 'T2t_vit_12', 'tokens_type':'performer', 'embed_dim':256, 'depth':12,
                  'num_heads':4, 'mlp_ratio':2., 'qk_scale': 256 ** -0.5},
    't2t_vit_14': {'default_cfg': 'T2t_vit_14', 'tokens_type':'performer', 'embed_dim':384, 'depth':14,
                  'num_heads':6, 'mlp_ratio':2., 'qk_scale': 384 ** -0.5},
    't2t_vit_19': {'default_cfg': 'T2t_vit_19', 'tokens_type':'performer', 'embed_dim':448, 'depth':19,
                  'num_heads':7, 'mlp_ratio':3., 'qk_scale': 448 ** -0.5},
    't2t_vit_24': {'default_cfg': 'T2t_vit_24', 'tokens_type': 'performer', 'embed_dim': 512, 'depth': 24,
                   'num_heads': 8, 'mlp_ratio': 3., 'qk_scale': 512 ** -0.5},
    't2t_vit_t_14': {'default_cfg': 'T2t_vit_t_14', 'tokens_type': 'transformer', 'embed_dim': 384, 'depth': 14,
                   'num_heads': 6, 'mlp_ratio': 3., 'qk_scale': 384 ** -0.5},
    't2t_vit_t_19': {'default_cfg': 'T2t_vit_t_19', 'tokens_type': 'transformer', 'embed_dim': 448, 'depth': 19,
                     'num_heads': 7, 'mlp_ratio': 3., 'qk_scale': 448 ** -0.5},
    't2t_vit_t_24': {'default_cfg': 'T2t_vit_t_24', 'tokens_type': 'transformer', 'embed_dim': 512, 'depth': 24,
                     'num_heads': 8, 'mlp_ratio': 3., 'qk_scale': 512 ** -0.5},
    't2t_vit_14_resnext': {'default_cfg': 'T2t_vit_14_resnext', 'tokens_type': 'performer', 'embed_dim': 384, 'depth': 14,
                     'num_heads': 8, 'mlp_ratio': 3., 'qk_scale': 384 ** -0.5},
    't2t_vit_14_wide': {'default_cfg': 'T2t_vit_14_wide', 'tokens_type': 'performer', 'embed_dim': 768,
                           'depth': 14,
                           'num_heads': 12, 'mlp_ratio': 3., 'qk_scale': 512 ** -0.5},
}

class T2T_module(nn.Module):
    """
    Tokens-to-Token encoding module
    """
    def __init__(self, img_size=(224, 224), tokens_type='performer', in_chans=3, embed_dim=768, token_dim=64):
        super().__init__()
        # import pdb;pdb.set_trace()
        if tokens_type == 'transformer':
            print('adopt transformer encoder for tokens-to-token')
            self.soft_split0 = nn.Unfold(kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
            self.soft_split1 = nn.Unfold(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
            self.soft_split2 = nn.Unfold(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))

            self.attention1 = Token_transformer(dim=in_chans * 7 * 7, in_dim=token_dim, num_heads=1, mlp_ratio=1.0)
            self.attention2 = Token_transformer(dim=token_dim * 3 * 3, in_dim=token_dim, num_heads=1, mlp_ratio=1.0)
            self.project = nn.Linear(token_dim * 3 * 3, embed_dim)
            

        elif tokens_type == 'performer':
            print('adopt performer encoder for tokens-to-token')
            self.soft_split0 = nn.Unfold(kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))
            self.soft_split1 = nn.Unfold(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))
            self.soft_split2 = nn.Unfold(kernel_size=(3, 3), stride=(2, 2), padding=(1, 1))

            #self.attention1 = Token_performer(dim=token_dim, in_dim=in_chans*7*7, kernel_ratio=0.5)
            #self.attention2 = Token_performer(dim=token_dim, in_dim=token_dim*3*3, kernel_ratio=0.5)
            self.attention1 = Token_performer(dim=in_chans*7*7, in_dim=token_dim, kernel_ratio=0.5)
            self.attention2 = Token_performer(dim=token_dim*3*3, in_dim=token_dim, kernel_ratio=0.5)
            self.project = nn.Linear(token_dim * 3 * 3, embed_dim)

        elif tokens_type == 'convolution':  # just for comparison with conolution, not our model
            # for this tokens type, you need change forward as three convolution operation
            print('adopt convolution layers for tokens-to-token')
            self.soft_split0 = nn.Conv2d(3, token_dim, kernel_size=(7, 7), stride=(4, 4), padding=(2, 2))  # the 1st convolution
            self.soft_split1 = nn.Conv2d(token_dim, token_dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) # the 2nd convolution
            self.project = nn.Conv2d(token_dim, embed_dim, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1)) # the 3rd convolution

        # self.num_patches = (img_size // (4 * 2 * 2)) * (img_size // (4 * 2 * 2))  # there are 3 sfot split, stride are 4,2,2 seperately
        self.num_patches = (img_size[0] // (4 * 2 * 2)) * (img_size[1] // (4 * 2 * 2))
        self.img_size = img_size
        self.out_h0 = (self.img_size[0] + 2 * self.soft_split0.padding[0] -
                 self.soft_split0.dilation * (self.soft_split0.kernel_size[0] - 1) -1) //self.soft_split0.stride[0] + 1
        self.out_w0 = (self.img_size[1] + 2 * self.soft_split0.padding[1] -
                 self.soft_split0.dilation * (self.soft_split0.kernel_size[1] - 1) -1) //self.soft_split0.stride[1] + 1
        self.out_h1 = (self.out_h0 + 2 * self.soft_split1.padding[0] -
                 self.soft_split1.dilation * (self.soft_split1.kernel_size[0] - 1) -1) //self.soft_split1.stride[0] + 1
        self.out_w1 = (self.out_w0 + 2 * self.soft_split1.padding[1] -
                 self.soft_split1.dilation * (self.soft_split1.kernel_size[1] - 1) -1) //self.soft_split1.stride[1] + 1
        self.out_h2 = (self.out_h1 + 2 * self.soft_split2.padding[0] -
                 self.soft_split2.dilation * (self.soft_split2.kernel_size[0] - 1) - 1) // self.soft_split2.stride[
                    0] + 1
        self.out_w2 = (self.out_w1 + 2 * self.soft_split2.padding[1] -
                 self.soft_split2.dilation * (self.soft_split2.kernel_size[1] - 1) - 1) // self.soft_split2.stride[
                    1] + 1 
        self.output_sizes = [list(img_size), [self.out_h0, self.out_w0], [self.out_h1, self.out_w1], [self.out_h2, self.out_w2]]
        self.l0 = int(self.out_h0) * int(self.out_w0)
        self.l1 = int(self.out_h1) * int(self.out_w1)
        self.l2 = int(self.out_h2) * int(self.out_w2)
    
    def forward(self, imgs):
        # import pdb;pdb.set_trace()
        left, right = imgs
        # res_left, res_right = [], []
        # res_left.append(left)
        # res_right.append(right)

        # step0: soft split
        x_left = self.soft_split0(left).transpose(1, 2)
        x_right = self.soft_split0(right).transpose(1, 2)
        # iteration1: re-structurization/reconstruction
        x = torch.cat([x_left, x_right], dim=1)
        x = self.attention1(x)
        B, _, C = x.shape
        x_left = x[:, :self.l0, :]
        x_right = x[:, self.l0:, :]
        x_left = x_left.transpose(1,2).reshape(B, C, int(self.out_h0), int(self.out_w0))
        x_right = x_right.transpose(1,2).reshape(B, C, int(self.out_h0), int(self.out_w0))
        x = x.transpose(1,2).reshape(B, C, 2 * int(self.out_h0), int(self.out_w0))

        # iteration1: soft split
        x_left = self.soft_split1(x_left).transpose(1, 2)
        x_right = self.soft_split1(x_right).transpose(1, 2)
        # iteration2: re-structurization/reconstruction
        x = torch.cat([x_left, x_right], dim=1)
        x = self.attention2(x)
        B, _, C = x.shape
        x_left = x[:, :self.l1, :]
        x_right = x[:, self.l1:, :]
        x_left = x_left.transpose(1, 2).reshape(B, C, int(self.out_h1), int(self.out_w1))
        x_right = x_right.transpose(1, 2).reshape(B, C, int(self.out_h1), int(self.out_w1))
        x = x.transpose(1, 2).reshape(B, C, 2 * int(self.out_h1), int(self.out_w1))
        # res_left.append(x_left)
        # res_right.append(x_right)

        # iteration2: soft split
        # x = self.soft_split2(x).transpose(1, 2)
        x_left = self.soft_split2(x_left).transpose(1, 2)
        x_right = self.soft_split2(x_right).transpose(1, 2)
     
        # final tokens
        x = torch.cat([x_left, x_right], dim=1)
        x = self.project(x)
        B, _, C = x.shape
        x_left = x[:, :self.l2, :]
        x_right = x[:, self.l2:, :]
        x_left = x_left.transpose(1, 2).reshape(B, C, int(self.out_h2), int(self.out_w2))
        x_right = x_right.transpose(1, 2).reshape(B, C, int(self.out_h2), int(self.out_w2))
        return x_left, x_right

@BACKBONE_NEW.register_module()
class T2T_ViTOri(BaseModule):
    def __init__(self, trans_type= 't2t_vit_12', img_size=(224, 224), in_chans=3, num_classes=1000, qkv_bias=False, qk_scale=None, drop_rate=0., attn_drop_rate=0.,
                 drop_path_rate=0., norm_layer=nn.LayerNorm, token_dim=64, pretrained=False):
        super().__init__()
        self.num_classes = num_classes
        token_settings = token_type_settings[trans_type]
        embed_dim = token_settings['embed_dim']
        tokens_type = token_settings['tokens_type']
        depth = token_settings['depth']
        num_heads = token_settings['num_heads']
        mlp_ratio = token_settings['mlp_ratio']
        if pretrained:
            qk_scale = token_settings['qk_scale']
        self.default_config = default_cfgs[token_settings['default_cfg']]
        self.num_features = self.embed_dim = embed_dim  # num_features for consistency with other models

        self.tokens_to_token = T2T_module(
                img_size=img_size, tokens_type=tokens_type, in_chans=in_chans, embed_dim=embed_dim, token_dim=token_dim)
        num_patches = self.tokens_to_token.num_patches

        self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
        # self.pos_embed = nn.Parameter(data=get_sinusoid_encoding(n_position=num_patches + 1, d_hid=embed_dim), requires_grad=False)
        # self.pos_embed = nn.Parameter(data=get_sinusoid_encoding(n_position=2*num_patches, d_hid=embed_dim), requires_grad=False)
        self.pos_embed = nn.Parameter(data=get_sinusoid_encoding(n_position=num_patches, d_hid=embed_dim), requires_grad=False)

        self.pos_drop = nn.Dropout(p=drop_rate)

        dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth)]  # stochastic depth decay rule
        self.blocks = nn.ModuleList([
            Block(
                dim=embed_dim, num_heads=num_heads, mlp_ratio=mlp_ratio, qkv_bias=qkv_bias, qk_scale=qk_scale,
                drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[i], norm_layer=norm_layer)
            for i in range(depth)])
        self.norm = norm_layer(embed_dim)

    def _init_weights(self, m):
        if isinstance(m, nn.Linear):
            nn.init.trunc_normal_(m.weight, std=.02)
            if isinstance(m, nn.Linear) and m.bias is not None:
                nn.init.constant_(m.bias, 0)
        elif isinstance(m, nn.LayerNorm):
            nn.init.constant_(m.bias, 0)
            nn.init.constant_(m.weight, 1.0)

    @torch.jit.ignore
    def no_weight_decay(self):
        return {'cls_token'}

    def get_classifier(self):
        return self.head

    def reset_classifier(self, num_classes, global_pool=''):
        self.num_classes = num_classes
        self.head = nn.Linear(self.embed_dim, num_classes) if num_classes > 0 else nn.Identity()

    def forward(self, x):
        # import pdb; pdb.set_trace()
        B = x[0].shape[0]
        left_feat, right_feat = self.tokens_to_token(x)
        B, C, out_h, out_w = left_feat.shape
        left_feat = left_feat.reshape(B, C, -1).transpose(1, 2)
        right_feat = right_feat.reshape(B, C, -1).transpose(1, 2)
        # x = torch.cat([left_feat, right_feat], dim=1)
        # num = out_h * out_w
        # import pdb;pdb.set_trace()
        # cls_tokens = self.cls_token.expand(B, -1, -1)
        # x = torch.cat((cls_tokens, x), dim=1)
        # x = x + self.pos_embed
        
        # x = self.pos_drop(x)
        
        left_feat = left_feat + self.pos_embed
        right_feat = right_feat + self.pos_embed
        left_feat = self.pos_drop(left_feat)
        right_feat = self.pos_drop(right_feat)

        for blk in self.blocks:
            # x = blk(x)
            left_feat = blk(left_feat)
            right_feat = blk(right_feat)
        
        left_feat = self.norm(left_feat)
        right_feat = self.norm(right_feat)
        # x = self.norm(x)
        # left_feat = x[:, :num, :].transpose(1,2).reshape(B,-1,out_h, out_w)
        # right_feat = x[:, num:, :].transpose(1,2).reshape(B,-1,out_h, out_w)]
        left_feat = left_feat.transpose(2,1).reshape(B, -1, out_h, out_w)
        right_feat = right_feat.transpose(2,1).reshape(B, -1, out_h, out_w)
        return left_feat, right_feat


def t2t_vit_7(pretrained=False, **kwargs): # adopt performer for tokens to token
    if pretrained:
        kwargs.setdefault('qk_scale', 256 ** -0.5)
    model = T2T_ViT(tokens_type='performer', embed_dim=256, depth=7, num_heads=4, mlp_ratio=2., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_7']
    if isinstance(pretrained, str):
        logger = get_root_logger()
        # load_pretrained(
        #     model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model


def t2t_vit_10(pretrained=False, **kwargs): # adopt performer for tokens to token
    if pretrained:
        kwargs.setdefault('qk_scale', 256 ** -0.5)
    model = T2T_ViT(tokens_type='performer', embed_dim=256, depth=10, num_heads=4, mlp_ratio=2., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_10']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model


def t2t_vit_12(pretrained=False, **kwargs): # adopt performer for tokens to token
    if pretrained:
        kwargs.setdefault('qk_scale', 256 ** -0.5)
    model = T2T_ViT(tokens_type='performer', embed_dim=256, depth=12, num_heads=4, mlp_ratio=2., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_12']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model



def t2t_vit_14(pretrained=False, **kwargs):  # adopt performer for tokens to token
    if pretrained:
        kwargs.setdefault('qk_scale', 384 ** -0.5)
    model = T2T_ViT(tokens_type='performer', embed_dim=384, depth=14, num_heads=6, mlp_ratio=3., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_14']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model


def t2t_vit_19(pretrained=False, **kwargs): # adopt performer for tokens to token
    if pretrained:
        kwargs.setdefault('qk_scale', 448 ** -0.5)
    model = T2T_ViT(tokens_type='performer', embed_dim=448, depth=19, num_heads=7, mlp_ratio=3., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_19']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model


def t2t_vit_24(pretrained=False, **kwargs): # adopt performer for tokens to token
    if pretrained:
        kwargs.setdefault('qk_scale', 512 ** -0.5)
    model = T2T_ViT(tokens_type='performer', embed_dim=512, depth=24, num_heads=8, mlp_ratio=3., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_24']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model


def t2t_vit_t_14(pretrained=False, **kwargs):  # adopt transformers for tokens to token
    if pretrained:
        kwargs.setdefault('qk_scale', 384 ** -0.5)
    model = T2T_ViT(tokens_type='transformer', embed_dim=384, depth=14, num_heads=6, mlp_ratio=3., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_t_14']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model


def t2t_vit_t_19(pretrained=False, **kwargs):  # adopt transformers for tokens to token
    if pretrained:
        kwargs.setdefault('qk_scale', 448 ** -0.5)
    model = T2T_ViT(tokens_type='transformer', embed_dim=448, depth=19, num_heads=7, mlp_ratio=3., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_t_19']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model


def t2t_vit_t_24(pretrained=False, **kwargs):  # adopt transformers for tokens to token
    if pretrained:
        kwargs.setdefault('qk_scale', 512 ** -0.5)
    model = T2T_ViT(tokens_type='transformer', embed_dim=512, depth=24, num_heads=8, mlp_ratio=3., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_t_24']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model

# rexnext and wide structure

def t2t_vit_14_resnext(pretrained=False, **kwargs):
    if pretrained:
        kwargs.setdefault('qk_scale', 384 ** -0.5)
    model = T2T_ViT(tokens_type='performer', embed_dim=384, depth=14, num_heads=32, mlp_ratio=3., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_14_resnext']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model


def t2t_vit_14_wide(pretrained=False, **kwargs):
    if pretrained:
        kwargs.setdefault('qk_scale', 512 ** -0.5)
    model = T2T_ViT(tokens_type='performer', embed_dim=768, depth=4, num_heads=12, mlp_ratio=3., **kwargs)
    model.default_cfg = default_cfgs['T2t_vit_14_wide']
    # if pretrained:
    #     load_pretrained(
    #         model, num_classes=model.num_classes, in_chans=kwargs.get('in_chans', 3))
    if isinstance(pretrained, str):
        logger = get_root_logger()
        load_checkpoint(model, pretrained, strict=False, logger=logger)
    return model
