import torch
import torch.nn as nn
from classification.focal_transformer import FocalTransformerBlock
if __name__ == '__main__':
    # input feature, 1/4 origin img size
    feature_size = 56  # H/4
    x = torch.rand(1, 96, feature_size, feature_size).cuda()

    # add position emdedding

    # parameters
    dim = 96
    input_resolution = (56, 56)  # input feature map resolution
    num_heads = 3
    window_size = 7
    use_shift = False
    expand_size = 3
    mlp_ratio = 4
    qkv_bias = True
    qk_scale = None
    drop = 0.
    attn_drop = 0.
    drop_path_rate = 0.2
    # drop_path
    # depths = [2, 2, 6, 2]
    # dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))]  # stochastic depth decay rule
    # print('dpr: ', dpr)
    norm_layer = nn.LayerNorm
    # next is the important parameter for Focal Transformer
    pool_method = 'fc'  # pool method
    focal_levels = [2, 2, 2, 2]  # 这边是分层次的, 所以传入的是一个list，针对单个Transformer可以传入一个值
    focal_window = [7, 5, 3, 1]  # 同上
    use_layerscale = False
    layerscale_value = 1e-4
    # Focal Transformer block
    block = FocalTransformerBlock(dim=dim, input_resolution=(input_resolution[0], input_resolution[1]),
                          num_heads=num_heads, window_size=window_size,
                          shift_size=0,
                          expand_size=expand_size,
                          mlp_ratio=mlp_ratio,
                          qkv_bias=qkv_bias, qk_scale=qk_scale,
                          drop=drop,
                          attn_drop=attn_drop,
                          # drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
                          drop_path=0.,
                          norm_layer=norm_layer,
                          pool_method=pool_method,
                          focal_level=focal_levels[0],
                          focal_window=focal_window[0],
                          use_layerscale=use_layerscale,
                          layerscale_value=layerscale_value).cuda()

    # 这边的输入需要做一下维度的转换
    B, C, H, W = x.shape
    x = x.view(B, C, -1).permute(0, 2, 1)
    x_out = block(x)
    print('x_out.shape: ', x_out.shape)
