
import sys
sys.path.append("/home/cole/project_graduate/mmdetection")
from mmdet.models.backbones import ResNeXt
from mmdet.models.layers.transformer import DetrTransformerEncoderLayer,DeformableDetrTransformerEncoderLayer
from mmdet.models.layers import SinePositionalEncoding
import torch
from torch.nn import Conv2d
conv_cfg = dict(type='ConvWS')
norm_cfg = dict(type='GN', num_groups=32, requires_grad=True)
backbone = ResNeXt(depth=50,
        groups=32,
        base_width=4,
        num_stages=4,
        out_indices=( 2, 3),
        frozen_stages=1,
        style='pytorch',
        conv_cfg=conv_cfg,
        norm_cfg=norm_cfg,
        init_cfg=dict(
            type='Pretrained',
            checkpoint='open-mmlab://jhu/resnext50_32x4d_gn_ws'))
transformer_dim = 256
bs = 8
backbone.cuda()
img = torch.ones([bs,3,512,512])
img = img.float()
img = img.cuda()
feature = backbone(img)


pos_mask = torch.ones(bs,32,32).float().cuda()
pos_encoding = SinePositionalEncoding(num_feats=transformer_dim/2,normalize=True)
pos_embed = pos_encoding(pos_mask)
pos_embed = pos_embed.view(bs, transformer_dim, -1).permute(0, 2, 1)



padding_mask = torch.ones([bs,32*32]).float().cuda()



dim_reduce_conv = Conv2d(1024,transformer_dim,1).cuda()
query = feature[0]
query = dim_reduce_conv(query)
query = query.view(bs, transformer_dim, -1).permute(0, 2, 1)



encoder = DetrTransformerEncoderLayer().cuda()
encoder1 = DeformableDetrTransformerEncoderLayer().cuda()
detr_out = encoder(query,pos_embed,padding_mask)

print(feature)