import torch
import torch.nn as nn
from typing import Optional, Union, List
# from backbone.our_resnet import Our_ResNet
from backbone.vit_win_rvsa_v3_wsz7 import vit_b_rvsa, vit_l_rvsa
from backbone.intern_image import InternImage
from backbone.vit import ViT_B, ViT_L
from backbone.vitaev2 import vitae_v2_s
from preprocessing import MTP_DataPreprocessor
from semantic_segmentation.encoder_decoder import MTP_SS_UperNet
from instance_segmentation.mask_rcnn import MTP_IS_MaskRCNN
from rotated_detection.oriented_rcnn import MTP_RD_OrientedRCNN
from mmdet.models.utils import empty_instances
# from mmdet.models.backbones import ResNet
from backbone.our_resnet import res50
from backbone.swin_transformer import swin_t,swin_b,swin_l
# from backbone.our_resnet import Our_ResNet
from backbone.lora import LoRA_ViT_timm
from mmengine.config import ConfigDict
import torch.distributed as dist
from pprint import pprint
from einops import rearrange
from torch.nn import init
from torch.nn.modules.utils import _pair
from torch import Tensor
from torchvision.ops.deform_conv import deform_conv2d as deform_conv2d_tv
import math
import torch.nn.functional as F
from sklearn.metrics import f1_score, accuracy_score, recall_score, precision_score
# from utils import AveragePrecisionMeter

class AverageMeter(object):
    """
    Computes and stores the average and current value
    Copied from: https://github.com/pytorch/examples/blob/master/imagenet/main.py
    """

    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def initialize_head(module):
    for m in module.modules():
        if isinstance(m, (nn.Linear, nn.Conv2d)):
            nn.init.xavier_uniform_(m.weight)
            if m.bias is not None:
                nn.init.constant_(m.bias, 0)

def accuracy_fn(y_true, y_pred):
    """Calculates accuracy between truth labels and predictions.

    Args:
        y_true (torch.Tensor): Truth labels for predictions.
        y_pred (torch.Tensor): Predictions to be compared to predictions.

    Returns:
        [torch.float]: Accuracy value between y_true and y_pred, e.g. 78.45
    """
    correct = torch.eq(y_true, y_pred).sum().item()
    acc = (correct / len(y_pred)) * 100
    return acc

def get_backbone(args):
    
    if args.backbone == 'swin_t':
        encoder = swin_t()
        print('################# Using Swin-T as backbone! ###################')
        if args.init_backbone == 'rsp':
            encoder.init_weights('./pretrained/rsp-swin-t-ckpt.pth')
            print('################# Initing Swin-T pretrained weights for Pretraining! ###################')
        elif args.init_backbone == 'none':
            print('################# Pure Swin-T Pretraining! ###################')
        else:
            raise NotImplementedError
    
    if args.backbone == 'swin_b':
        encoder = swin_b()
        print('################# Using Swin-T as backbone! ###################')
        if args.init_backbone == 'imp':
            encoder.init_weights('./pretrained/swin_base_patch4_window7_224_22k_20220317-4f79f7c0.pth')
            print('################# Initing Swin-T pretrained weights for Pretraining! ###################')
        elif args.init_backbone == 'none':
            print('################# Pure Swin-T Pretraining! ###################')
        else:
            raise NotImplementedError
        
    if args.backbone == 'swin_l':
        encoder = swin_l()
        print('################# Using Swin-L as backbone! ###################')
        if args.init_backbone == 'imp':
            encoder.init_weights('./pretrained/swin_large_patch4_window7_224_22k_20220412-aeecf2aa.pth')
            print('################# Initing Swin-T pretrained weights for Pretraining! ###################')
        elif args.init_backbone == 'none':
            print('################# Pure Swin-T Pretraining! ###################')
        else:
            raise NotImplementedError


    if args.backbone == 'vit_b_rvsa':
        encoder = vit_b_rvsa(args)
        print('################# Using ViT-B + RVSA as backbone! ###################')
        if args.init_backbone == 'mae':
            encoder.init_weights('./pretrained/vit-b-checkpoint-1599.pth')
            print('################# Initing ViT-B + RVSA pretrained weights for Pretraining! ###################')
        elif args.init_backbone == 'none':
            print('################# Pure ViT-B + RVSA SEP Pretraining! ###################')
        else:
            raise NotImplementedError

    elif args.backbone == 'vit_l_rvsa':
        encoder = vit_l_rvsa(args)
        print('################# Using ViT-L + RVSA as backbone! ###################')
        if args.init_backbone == 'mae':
            encoder.init_weights('./pretrained/vit-l-mae-checkpoint-1599.pth')
            print('################# Initing ViT-L + RVSA pretrained weights for Pretraining! ###################')
        elif args.init_backbone == 'none':
            print('################# Pure ViT-L + RVSA SEP Pretraining! ###################')
        else:
            raise NotImplementedError

    elif args.backbone == 'vit_l':
        encoder = ViT_L(args)
        print('################# Using ViT-L as backbone! ###################')
        if args.init_backbone == 'mae':
            encoder.init_weights('./pretrained/vit-l-mae-checkpoint-1599.pth')
            print('################# Initing ViT-L pretrained weights for Pretraining! ###################')
        elif args.init_backbone == 'none':
            print('################# Pure ViT-L SEP Pretraining! ###################')
        else:
            raise NotImplementedError

    elif args.backbone == 'vit_b':
        encoder = ViT_B(args)
        print('################# Using ViT-B as backbone! ###################')
        if args.init_backbone == 'mae':
            encoder.init_weights('./pretrained/vit-b-checkpoint-1599.pth')
            print('################# Initing ViT-B  pretrained weights for Pretraining! ###################')
        elif args.init_backbone == 'none':
            print('################# Pure ViT-B SEP Pretraining! ###################')
        else:
            raise NotImplementedError

    elif args.backbone == 'internimage_xl':
        encoder = InternImage(core_op='DCNv3',
                        channels=192,
                        depths=[5, 5, 24, 5],
                        groups=[12, 24, 48, 96],
                        mlp_ratio=4.,
                        drop_path_rate=0.2,
                        norm_layer='LN',
                        layer_scale=1e-5,
                        offset_scale=2.0,
                        post_norm=True,
                        with_cp=True,
                        out_indices=(0, 1, 2, 3)
                        )
        print('################# Using InternImage-XL as backbone! ###################')
        if args.init_backbone == 'imp':
            encoder.init_weights('./pretrained/internimage_xl_22kto1k_384.pth')
            print('################# Initing InterImage-T pretrained weights for Pretraining! ###################')
        elif args.init_backbone == 'none':
            print('################# Pure InterImage-T SEP Pretraining! ###################')
        else:
            raise NotImplementedError
        
    elif args.backbone == 'vitaev2_s':
        print('################# Using ViTAEv2-S as backbone! ###################')
        encoder = vitae_v2_s(args)
        if args.init_backbone == 'rsp':
            encoder.init_weights("./pretrained/rsp-vitaev2-s-ckpt.pth")
            print('################# Using RSP as pretraining! ###################')
        elif args.init_backbone == 'none':
            print('################# Pure ViTAEV2-S Pretraining! ###################')
        else:
            raise NotImplementedError
        
    elif args.backbone == 'resnet50':
        print('################# Using ResNet-50 as backbone! ###################')
        encoder = res50()
        if args.init_backbone == 'rsp':
            encoder.init_weights("./pretrained/rsp-resnet-50-ckpt.pth")
            print('################# Using RSP as pretraining! ###################')
        elif args.init_backbone == 'imp':
            encoder.init_weights("./pretrained/resnet50-0676ba61.pth")
        elif args.init_backbone == 'none':
            print('################# Pure  Pretraining! ###################')
        else:
            raise NotImplementedError



    return encoder

def get_semsegdecoder(args, in_channels):
    semsegdecoder = MTP_SS_UperNet(
    decode_head = dict(
                type='UPerHead',
                num_classes = 1,
                in_channels=in_channels,
                ignore_index=255,
                in_index=[0, 1, 2, 3],
                pool_scales=(1, 2, 3, 6),
                channels=256,
                dropout_ratio=0.1,
                norm_cfg=dict(type='SyncBN', requires_grad=True),
                align_corners=False,
                loss_decode=dict(
                type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0)
                ),
        # auxiliary_head=dict(
        #     type='FCNHead',
        #     in_channels=384,
        #     in_index=2,
        #     channels=256,
        #     num_convs=1,
        #     concat_input=False,
        #     dropout_ratio=0.1,
        #     num_classes=19,
        #     norm_cfg=dict(type='SyncBN', requires_grad=True),
        #     align_corners=False,
        #     loss_decode=dict(
        #     type='CrossEntropyLoss', use_sigmoid=False, loss_weight=0.4))
            )
    return semsegdecoder


class MutliTaskFramework_demt(torch.nn.Module):
    def __init__(self, 
                  args, 
                  logger,
                  classes: int = 1,
                  batch_augments = None):
        super(MutliTaskFramework_demt, self).__init__()

        self.data_preprocessor = MTP_DataPreprocessor(
                mean=[123.675, 116.28, 103.53],
                std=[58.395, 57.12, 57.375],
                bgr_to_rgb=True,
                pad_size_divisor=32,
                pad_mask=True,
                mask_pad_value=0,
                pad_seg=True,
                seg_pad_value=255,
                boxtype2tensor=True,
                batch_augments=batch_augments
        )
        self.data_preprocessor.to('cuda')

        self.args = args
        self.classes = classes

        if args.background == 'True':
            self.ss_classes = self.classes
            self.rd_classes = self.classes - 1
        else:
            raise NotImplementedError
        if dist.get_rank() == 0:
            logger.info("######## Use feature interaction!!! #######")
        self.encoder = get_backbone(args)

        # Init task head
        if 'ss' in args.tasks:
            print('################# Using UperNet for semseg! ######################')
            self.semsegdecoder = get_semsegdecoder(args, in_channels=getattr(self.encoder, 'out_channels', None))
            self.semseghead = nn.Sequential(
                    nn.Dropout2d(0.1),
                    nn.Conv2d(256, self.ss_classes, kernel_size=1)
                )
        if 'cls' in args.tasks:
            print('################# Using Plain Classification Head! ######################')
            self.sceneclshead = nn.Sequential(
                    # nn.Conv2d(512, 512, kernel_size=3, padding=1),  # 示例卷积层
                    nn.AdaptiveAvgPool2d((1, 1)),  # 自适应池化层，将特征图缩小为1x1
                    nn.Flatten(),
                    nn.Linear(64, 13)  # 全连接层，输出类别数
                )
        if 'rd' in args.tasks:
            print('################# Using Oriented-RCNN for rotdet! ######################')
            self.rotdetdecoder = MTP_RD_OrientedRCNN(
            neck = ConfigDict(
                type='mmdet.FPN',
                in_channels=self.encoder.out_channels,
                out_channels=256,
                num_outs=5)
            )
            
            self.rotdetroiboxhead_fc_cls = nn.Linear(self.rotdetdecoder.roi_head.bbox_head.cls_last_dim, self.rd_classes + 1)
            self.rotdetroiboxhead_fc_reg = nn.Linear(self.rotdetdecoder.roi_head.bbox_head.reg_last_dim, 
                                                    self.rotdetdecoder.roi_head.bbox_head.bbox_coder.encode_size)
        
            
        if args.finetune == 'lora' and args.backbone == 'vit_b_rvsa':
            num_params = sum(p.numel() for p in self.encoder.parameters() if p.requires_grad)
            print(f"Before lora trainable parameters: {num_params}") #trainable parameters: 86859496

            self.encoder = LoRA_ViT_timm(vit_model=self.encoder, r=4, alpha=4, num_classes=10)

            num_params = sum(p.numel() for p in self.encoder.parameters() if p.requires_grad)
            print(f"After loratrainable parameters: {num_params}") #trainable parameters: 86859496
                    #self.initialize()


        self.tasks = args.tasks 
        self.in_channels = sum([64, 128, 256, 512])
        dim_ = 64


        self.linear1 = nn.Sequential(nn.Linear(self.in_channels, dim_), nn.LayerNorm(dim_))
        self.task_fusion = nn.MultiheadAttention(embed_dim=dim_, num_heads=4, dropout=0.)
        # self.linear1 = nn.Sequential(nn.Linear(self.in_channels, dim_), nn.LayerNorm(dim_))
        self.defor_mixers = nn.ModuleList([DefMixer(dim_in=dim_, dim=dim_, depth=1)  for t in range (len(self.tasks))])
        self.smlp = nn.Sequential(nn.Linear(dim_, dim_), nn.LayerNorm(dim_))

        self.smlp2 = nn.ModuleList([nn.Sequential(nn.Linear(dim_, dim_), nn.LayerNorm(dim_))  for t in range (len(self.tasks))])

        self.task_querys = nn.ModuleList([nn.MultiheadAttention(embed_dim=dim_, num_heads=4, dropout=0.)  for t in range (len(self.tasks))])

        self.rdconv = ConvBNReLU(dim_,
                                    64,
                                    kernel_size=3,
                                    norm_layer=nn.BatchNorm2d,
                                    activation_layer=nn.ReLU)
        self.ssconv = ConvBNReLU(dim_,
                                    64,
                                    kernel_size=3,
                                    norm_layer=nn.BatchNorm2d,
                                    activation_layer=nn.ReLU)
        self.clsconv = ConvBNReLU(dim_,
                                    64,
                                    kernel_size=3,
                                    norm_layer=nn.BatchNorm2d,
                                    activation_layer=nn.ReLU)


    def train_rotdet_roi_head_box_head_forward(self, r, sampling_results, num_classes, fc_cls=None, fc_reg=None):                     
        bbox_feats, rois = self.rotdetdecoder.roi_head.train_bbox_forward(r, sampling_results)
        # box head of roi head
        x_cls, x_reg = self.rotdetdecoder.roi_head.bbox_head(bbox_feats)
        cls_score, bbox_pred = fc_cls(x_cls), fc_reg(x_reg)
        bbox_results = dict(
            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
        bbox_results = self.rotdetdecoder.roi_head.bbox_loss(num_classes, sampling_results, rois, bbox_results)
        return bbox_results
    
    def test_rotdet_roi_head_box_head_forward(self, r, batch_img_metas, proposals, rois, rcnn_test_cfg, num_classes, bbox_rescale, fc_cls=None, fc_reg=None):
        bbox_feats = self.rotdetdecoder.roi_head.test_bbox_roi_feats(r, rois)
        x_cls, x_reg = self.rotdetdecoder.roi_head.bbox_head(bbox_feats)
        cls_score, bbox_pred = fc_cls(x_cls), fc_reg(x_reg)
        bbox_results = dict(
            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
        result_list = self.rotdetdecoder.roi_head.predict_bbox(
        batch_img_metas, bbox_results, proposals, rois, rcnn_test_cfg, num_classes, rescale = bbox_rescale
        )
        return result_list

    def forward(self, data):

        if self.training:
            data = self.data_preprocessor(data, True)
        else:
            data = self.data_preprocessor(data, False)

        x, data_sample = data['inputs'], data['data_samples']
        # print(x1.shape, x2.shape,x3.shape)
        b = x.shape[0]
        e = self.encoder(x)

        inputs = [e[i] for i in range(0,len(e))]

        upsampled_inputs = [
            nn.functional.interpolate(
                input=x,
                size=inputs[0].shape[2:],
                mode='bilinear',
                align_corners=False) for x in inputs
        ]
        inputs = torch.cat(upsampled_inputs, dim=1)
        # print(inputs.shape) # torch.Size([2, 960, 256, 256])
        b, c, h, w = inputs.shape
        inp = self.linear1(inputs.permute(0, 2, 3, 1)).permute(0, 3, 1, 2)

        outs=[]
        for ind, defor_mixer in enumerate(self.defor_mixers):
            out = defor_mixer(inp)
            out = rearrange(out, "b c h w -> b (h w) c").contiguous()
            outs.append(out)
        # for i in range(len(outs)): 
        #     print(outs[i].shape) # torch.Size([2, 65536, 256]) torch.Size([2, 65536, 256])
        task_cat = torch.cat(outs, dim=1) # torch.Size([2, 131072, 256])
        task_cat = self.task_fusion(task_cat, task_cat, task_cat)[0] # torch.Size([2, 131072, 256])
        # for item in self.task_fusion(task_cat, task_cat, task_cat):
        #     print(item.shape) # torch.Size([2, 131072, 256]) torch.Size([131072, 2, 2])
        task_cat = self.smlp(task_cat) # torch.Size([2, 131072, 256])

        task_cat = task_cat.permute(0, 2, 1).unsqueeze(3)
        task_cat_compressed_4d = F.interpolate(task_cat, size=(65536, 1), mode='nearest')
        task_cat = task_cat_compressed_4d.squeeze(3).permute(0, 2, 1)
        # print(task_cat.shape)

        outs_ls = []
        for ind, task_query in enumerate(self.task_querys):
            inp = outs[ind] + self.smlp2[ind](task_query(outs[ind], task_cat, task_cat)[0])
            outs_ls.append(rearrange(inp, "b (h w) c -> b c h w", h=h, w=w).contiguous())
        # print()
        inp_dict = {t: outs_ls[idx] for idx, t in enumerate(self.tasks)}
        # task_specific_feats = {t: self.bottleneck[t](inp_dict[t]) for t in self.tasks}
        # print(len(inp_dict['ss']), len(inp_dict['rd']))
        # print(inp_dict['ss'][0].shape, inp_dict['ss'][1].shape)


        if 'ss' in self.args.tasks:
            ss_feat = torch.stack([inp_dict['ss']]).squeeze(0)
            ss_feat = self.ssconv(ss_feat)
            ss_feat = F.interpolate(ss_feat, size=(256, 256), mode='bilinear', align_corners=False)
            e_ss = e.copy()
            e_ss[0] = ss_feat
            ss = self.semsegdecoder.decode_head._forward_feature(e_ss)
            seg_logits = self.semseghead(ss)

        if 'rd' in self.args.tasks:
            rd_feat = torch.stack([inp_dict['rd']]).squeeze(0)
            rd_feat = self.rdconv(rd_feat)
            rd_feat = F.interpolate(rd_feat, size=(256, 256), mode='bilinear', align_corners=False)
            e_rd = e.copy()
            e_rd[0] = rd_feat
            rd = self.rotdetdecoder.neck(e_rd)

        
        if 'cls' in self.args.tasks:
            cls_feat = torch.stack([inp_dict['cls']]).squeeze(0)
            gt_scene_cls = [
                data.scene_classes.get("scene_classes") for data in data_sample
            ]
            gt_scene_cls = torch.stack(gt_scene_cls, dim=0).squeeze(1).to(torch.float32).cuda()
            # print(gt_scene_cls.shape, gt_scene_cls.dtype)
            # cls_fn = nn.CrossEntropyLoss()
            cls_fn = nn.BCEWithLogitsLoss() 
        

        ######################### trainning
        if self.training:
            losses = {}
            ######################### train sem seg
            
            if 'ss' in self.args.tasks:
                loss_ss = self.semsegdecoder.decode_head.loss_by_feat(seg_logits, data_sample)
                losses['loss_ss'] = loss_ss

             ######################### train rot det
            if 'rd' in self.args.tasks:
                # rpn head
                loss_rd, tbr = self.rotdetdecoder.train_before_roihead(rd, data_sample)
                # select propsal
                sampling_results = self.rotdetdecoder.roi_head.gen_sampling_results(tbr)
                # roi_head_box_head  
                bbox_results = self.train_rotdet_roi_head_box_head_forward(rd, sampling_results, self.rd_classes,
                                                                                        fc_cls = self.rotdetroiboxhead_fc_cls,
                                                                                        fc_reg = self.rotdetroiboxhead_fc_reg)
                
                loss_rd.update(bbox_results['loss_bbox'])
                losses['loss_rd'] = loss_rd
                
            if 'cls' in self.args.tasks:
                # print(cls_feat.shape)
                cls_pred = self.sceneclshead(cls_feat)
                losses['loss_cls'] = cls_fn(cls_pred, gt_scene_cls)

            return losses

        ######################### validation
        else:
            outputs = {}
            if 'ss' in self.args.tasks:
                if data_sample is not None:
                    batch_img_metas = [
                        data_sample.metainfo for data_sample in data_sample
                    ]
                    #print(batch_img_metas)
                else:
                    batch_img_metas = [
                        dict(
                            ori_shape=x.shape[2:],
                            img_shape=x.shape[2:],
                            pad_shape=x.shape[2:],
                            padding_size=[0, 0, 0, 0])
                    ] * x.shape[0]

                seg_logits = self.semsegdecoder.decode_head.predict_by_feat(seg_logits, batch_img_metas)
                output_ss = self.semsegdecoder.postprocess_result(seg_logits, data_sample)

                outputs['output_ss'] = output_ss

            ######################### test rot det
                
            if 'rd' in self.args.tasks:
                rpn_results_list, rescale = self.rotdetdecoder.test_before_roihead(rd, data_sample)
                assert self.rotdetdecoder.roi_head.with_bbox, 'Bbox head must be implemented.'

                batch_img_metas = [
                    data_samples.metainfo for data_samples in data_sample
                ]

                proposals, rois = self.rotdetdecoder.roi_head.test_bbox_generate_roi(rpn_results_list)
                rcnn_test_cfg = self.rotdetdecoder.roi_head.test_cfg
                bbox_rescale = rescale if not self.rotdetdecoder.roi_head.with_mask else False

                if rois.shape[0] == 0:
                    results_list = empty_instances(
                        batch_img_metas,
                        rois.device,
                        task_type='bbox',
                        box_type=self.rotdetdecoder.roi_head.bbox_head.predict_box_type,
                        num_classes=self.rd_classes,
                        score_per_cls=rcnn_test_cfg is None)
                else:
                    results_list = self.test_rotdet_roi_head_box_head_forward(rd, batch_img_metas, proposals, rois, 
                                                                            rcnn_test_cfg, self.rd_classes, bbox_rescale, 
                                                                            fc_cls = self.rotdetroiboxhead_fc_cls, 
                                                                            fc_reg = self.rotdetroiboxhead_fc_reg)
                    
                output_rd = self.rotdetdecoder.add_pred_to_datasample(data_sample, results_list)
                outputs['output_rd'] = output_rd

            if 'cls' in self.args.tasks:
                cls_logits = self.sceneclshead(cls_feat)
                preds = torch.sigmoid(cls_logits).data > 0.5 
                # preds = torch.sigmoid(cls_logits).data
                f1_miro = f1_score(gt_scene_cls.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy() , average="micro",zero_division=1.0)
                accuracy_miro = accuracy_score(gt_scene_cls.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy())
                recall_miro = recall_score(gt_scene_cls.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy() , average="micro",zero_division=1.0)
                precision_miro = precision_score(gt_scene_cls.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy() , average="micro",zero_division=1.0)

                outputs['output_cls'] = f1_miro
                outputs['cls_acc'] = accuracy_miro
                outputs['cls_recall'] = recall_miro
                outputs['cls_precison'] = precision_miro
                outputs['cls_gt'] = gt_scene_cls
                outputs['batch_size'] = gt_scene_cls.size(0)


            return outputs


class MutliTaskFramework(torch.nn.Module):
    def __init__(self, 
                  args, 
                  logger,
                  classes: int = 1,
                  batch_augments = None):
        super(MutliTaskFramework, self).__init__()

        self.data_preprocessor = MTP_DataPreprocessor(
                mean=[123.675, 116.28, 103.53],
                std=[58.395, 57.12, 57.375],
                bgr_to_rgb=True,
                pad_size_divisor=32,
                pad_mask=True,
                mask_pad_value=0,
                pad_seg=True,
                seg_pad_value=255,
                boxtype2tensor=True,
                batch_augments=batch_augments
        )
        self.data_preprocessor.to('cuda')
        self.args = args
        self.classes = classes
        self.cls_classes = 13

        if args.background == 'True':
            self.ss_classes = self.classes
            self.rd_classes = self.classes - 1
        
        else:
            raise NotImplementedError

        self.encoder = get_backbone(args)

        # Init task head
        if 'ss' in args.tasks:
            print('################# Using UperNet for semseg! ######################')
            self.semsegdecoder = get_semsegdecoder(args, in_channels=getattr(self.encoder, 'out_channels', None))
            self.semseghead = nn.Sequential(
                    nn.Dropout2d(0.1),
                    nn.Conv2d(256, self.ss_classes, kernel_size=1)
                )
        if 'cls' in args.tasks:
            print('################# Using Plain Classification Head! ######################')
            self.sceneclshead = nn.Sequential(
                    # nn.Conv2d(512, 512, kernel_size=3, padding=1),  # 示例卷积层
                    nn.AdaptiveAvgPool2d((1, 1)),  # 自适应池化层，将特征图缩小为1x1
                    nn.Flatten(),
                    nn.Linear(self.encoder.out_channels[-1], self.cls_classes)  # 全连接层，输出类别数
                )
        if 'rd' in args.tasks:
            print('################# Using Oriented-RCNN for rotdet! ######################')

            self.rotdetdecoder = MTP_RD_OrientedRCNN(
            neck = ConfigDict(
                type='mmdet.FPN',
                in_channels=self.encoder.out_channels,
                out_channels=256,
                num_outs=5)
            )
            
            self.rotdetroiboxhead_fc_cls = nn.Linear(self.rotdetdecoder.roi_head.bbox_head.cls_last_dim, self.rd_classes + 1)
            self.rotdetroiboxhead_fc_reg = nn.Linear(self.rotdetdecoder.roi_head.bbox_head.reg_last_dim, 
                                                    self.rotdetdecoder.roi_head.bbox_head.bbox_coder.encode_size)
        
            
        if args.finetune == 'lora' and args.backbone == 'vit_b_rvsa':
            num_params = sum(p.numel() for p in self.encoder.parameters() if p.requires_grad)
            print(f"Before lora trainable parameters: {num_params}") #trainable parameters: 86859496

            self.encoder = LoRA_ViT_timm(vit_model=self.encoder, r=4, alpha=4, num_classes=10)

            num_params = sum(p.numel() for p in self.encoder.parameters() if p.requires_grad)
            print(f"After loratrainable parameters: {num_params}") #trainable parameters: 86859496
                    #self.initialize()

    def train_rotdet_roi_head_box_head_forward(self, r, sampling_results, num_classes, fc_cls=None, fc_reg=None):
        bbox_feats, rois = self.rotdetdecoder.roi_head.train_bbox_forward(r, sampling_results)
        # box head of roi head
        x_cls, x_reg = self.rotdetdecoder.roi_head.bbox_head(bbox_feats)
        cls_score, bbox_pred = fc_cls(x_cls), fc_reg(x_reg)
        bbox_results = dict(
            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
        bbox_results = self.rotdetdecoder.roi_head.bbox_loss(num_classes, sampling_results, rois, bbox_results)
        return bbox_results
    
    def test_rotdet_roi_head_box_head_forward(self, r, batch_img_metas, proposals, rois, rcnn_test_cfg, num_classes, bbox_rescale, fc_cls=None, fc_reg=None):
        bbox_feats = self.rotdetdecoder.roi_head.test_bbox_roi_feats(r, rois)
        x_cls, x_reg = self.rotdetdecoder.roi_head.bbox_head(bbox_feats)
        cls_score, bbox_pred = fc_cls(x_cls), fc_reg(x_reg)
        bbox_results = dict(
            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
        result_list = self.rotdetdecoder.roi_head.predict_bbox(
        batch_img_metas, bbox_results, proposals, rois, rcnn_test_cfg, num_classes, rescale = bbox_rescale
        )
        return result_list

    def forward(self, data):

        if self.training:
            data = self.data_preprocessor(data, True)
        else:
            data = self.data_preprocessor(data, False)

        x, data_sample = data['inputs'], data['data_samples']

        b = x.shape[0]
        e = self.encoder(x)


        if 'ss' in self.args.tasks:
            ss = self.semsegdecoder.decode_head._forward_feature(e)
            seg_logits = self.semseghead(ss)

        ######################### rot det
            
        if 'rd' in self.args.tasks:
            rd = self.rotdetdecoder.neck(e)

        if 'cls' in self.args.tasks:

            gt_scene_cls = [
                data.scene_classes.get("scene_classes") for data in data_sample
            ]
            gt_scene_cls = torch.stack(gt_scene_cls, dim=0).squeeze(1).to(torch.float32).cuda()
            # print(gt_scene_cls.shape, gt_scene_cls.dtype)
            # cls_fn = nn.CrossEntropyLoss()
            cls_fn = nn.BCEWithLogitsLoss() 


        ######################### trainning
        if self.training:
            losses = {}

            ######################### train sem seg
            if 'ss' in self.args.tasks:
                loss_ss = self.semsegdecoder.decode_head.loss_by_feat(seg_logits, data_sample)
                losses['loss_ss'] = loss_ss

             ######################### train rot det
            if 'rd' in self.args.tasks:
                # rpn head
                loss_rd, tbr = self.rotdetdecoder.train_before_roihead(rd, data_sample)
                # select propsal
                sampling_results = self.rotdetdecoder.roi_head.gen_sampling_results(tbr)
                # roi_head_box_head
                bbox_results = self.train_rotdet_roi_head_box_head_forward(rd, sampling_results, self.rd_classes,
                                                                                        fc_cls = self.rotdetroiboxhead_fc_cls,
                                                                                        fc_reg = self.rotdetroiboxhead_fc_reg)
                loss_rd.update(bbox_results['loss_bbox'])
                losses['loss_rd'] = loss_rd

            if 'cls' in self.args.tasks:
                cls_pred = self.sceneclshead(e[3])
                losses['loss_cls'] = cls_fn(cls_pred, gt_scene_cls)
                
            return losses

        ######################### validation
        else:
            outputs = {}
            
            if 'ss' in self.args.tasks:
                if data_sample is not None:
                    batch_img_metas = [
                        data_sample.metainfo for data_sample in data_sample
                    ]
                    #print(batch_img_metas)
                else:
                    batch_img_metas = [
                        dict(
                            ori_shape=x.shape[2:],
                            img_shape=x.shape[2:],
                            pad_shape=x.shape[2:],
                            padding_size=[0, 0, 0, 0])
                    ] * x.shape[0]

                seg_logits = self.semsegdecoder.decode_head.predict_by_feat(seg_logits, batch_img_metas)
                output_ss = self.semsegdecoder.postprocess_result(seg_logits, data_sample)

                outputs['output_ss'] = output_ss

            ######################### test rot det
                
            if 'rd' in self.args.tasks:
                rpn_results_list, rescale = self.rotdetdecoder.test_before_roihead(rd, data_sample)
                assert self.rotdetdecoder.roi_head.with_bbox, 'Bbox head must be implemented.'

                batch_img_metas = [
                    data_samples.metainfo for data_samples in data_sample
                ]

                proposals, rois = self.rotdetdecoder.roi_head.test_bbox_generate_roi(rpn_results_list)
                rcnn_test_cfg = self.rotdetdecoder.roi_head.test_cfg
                bbox_rescale = rescale if not self.rotdetdecoder.roi_head.with_mask else False

                if rois.shape[0] == 0:
                    results_list = empty_instances(
                        batch_img_metas,
                        rois.device,
                        task_type='bbox',
                        box_type=self.rotdetdecoder.roi_head.bbox_head.predict_box_type,
                        num_classes=self.rd_classes,
                        score_per_cls=rcnn_test_cfg is None)
                else:
                    results_list = self.test_rotdet_roi_head_box_head_forward(rd, batch_img_metas, proposals, rois, 
                                                                            rcnn_test_cfg, self.rd_classes, bbox_rescale, 
                                                                            fc_cls = self.rotdetroiboxhead_fc_cls, 
                                                                            fc_reg = self.rotdetroiboxhead_fc_reg)
                    
                output_rd = self.rotdetdecoder.add_pred_to_datasample(data_sample, results_list)
                outputs['output_rd'] = output_rd


            if 'cls' in self.args.tasks:
                cls_logits = self.sceneclshead(e[3])
                preds = torch.sigmoid(cls_logits).data > 0.5 
                # preds = torch.sigmoid(cls_logits).data
                f1_miro = f1_score(gt_scene_cls.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy() , average="micro",zero_division=1.0)
                accuracy_miro = accuracy_score(gt_scene_cls.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy())
                recall_miro = recall_score(gt_scene_cls.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy() , average="micro",zero_division=1.0)
                precision_miro = precision_score(gt_scene_cls.to("cpu").to(torch.int).numpy() ,preds.to("cpu").to(torch.int).numpy() , average="micro",zero_division=1.0)

                outputs['output_cls'] = f1_miro
                outputs['cls_acc'] = accuracy_miro
                outputs['cls_recall'] = recall_miro
                outputs['cls_precison'] = precision_miro
                outputs['cls_gt'] = gt_scene_cls
                outputs['batch_size'] = gt_scene_cls.size(0)

            return outputs


class TestFrameWork(torch.nn.Module):
    def __init__(self, 
                  args, 
                  logger,
                  classes: int = 1,
                  batch_augments = None):
        super(TestFrameWork, self).__init__()

        self.data_preprocessor = MTP_DataPreprocessor(
                mean=[123.675, 116.28, 103.53],
                std=[58.395, 57.12, 57.375],
                bgr_to_rgb=True,
                pad_size_divisor=32,
                pad_mask=True,
                mask_pad_value=0,
                pad_seg=True,
                seg_pad_value=255,
                boxtype2tensor=True,
                # batch_augments=batch_augments
        )
        self.data_preprocessor.to('cuda')
        self.args = args
        self.classes = classes

        if args.background == 'True':
            self.ss_classes = self.classes
            self.rd_classes = self.classes - 1
        else:
            raise NotImplementedError

        self.encoder = res50()

        print('################# Using Oriented-RCNN for rotdet! ######################')

        self.rotdetdecoder = MTP_RD_OrientedRCNN(
        neck = ConfigDict(
            type='mmdet.FPN',
            in_channels=self.encoder.out_channels,
            out_channels=256,
            num_outs=5)
        )
        pretrained = '/data1/users/zhengzhiyu/mtp_workplace/documents/RSP/Object Detection/work_dirs/faster/faster_rcnn_orpn_r50_fpn_1x_dota10/latest.pth'
        self.rotdetroiboxhead_fc_cls = nn.Linear(self.rotdetdecoder.roi_head.bbox_head.cls_last_dim, self.rd_classes + 1)
        self.rotdetroiboxhead_fc_reg = nn.Linear(self.rotdetdecoder.roi_head.bbox_head.reg_last_dim, 
                                                self.rotdetdecoder.roi_head.bbox_head.bbox_coder.encode_size)
        
        checkpoint = torch.load(pretrained, map_location='cpu')
        if 'state_dict' in checkpoint:
            state_dict = checkpoint['state_dict']
        elif 'model' in checkpoint:
            state_dict = checkpoint['model']
        else:
            state_dict = checkpoint

        if list(state_dict.keys())[0].startswith('backbone.'):
            state_dict = {k.replace('backbone', 'encoder'): v for k, v in state_dict.items()}
        
        changes = {}
        for k, v in state_dict.items():
            if k.startswith('rpn_head') or k.startswith('neck') :
                new_key = 'rotdetdecoder.' + k
                changes[k] = new_key
            if k.startswith('roi_head.bbox_head.fc_cls'):
                new_key = k.replace('roi_head.bbox_head.fc_cls', 'rotdetroiboxhead_fc_cls')
                changes[k] = new_key
            if k.startswith('roi_head.bbox_head.fc_reg'):
                new_key = k.replace('roi_head.bbox_head.fc_reg', 'rotdetroiboxhead_fc_reg')
                changes[k] = new_key
            if k.startswith('roi_head.bbox_head.shared_fcs'):
                new_key = k.replace('roi_head.bbox_head.shared_fcs', 'rotdetdecoder.roi_head.bbox_head.shared_fcs')
                changes[k] = new_key
            
        for old_key, new_key in changes.items():
            state_dict[new_key] = state_dict.pop(old_key)
        # state_dict = {k.replace("neck","rotdetdecoder.neck"): v for k, v in state_dict.items()}
        # state_dict = {k.replace('rpn_head', 'rotdetdecoder.rpn_head'): v for k, v in state_dict.items()}
        # state_dict = {k.replace("roi_head","rotdetdecoder.roi_head"): v for k, v in state_dict.items()}
        # if list(state_dict.keys())[0].startswith('rpn_head.'):
        #     state_dict = {k.replace('rpn_head', 'rotdetdecoder.rpn_head'): v for k, v in state_dict.items()}
        # if list(state_dict.keys())[0].startswith('rpn_head.'):
        #     state_dict = {k.replace('rpn_head', 'rotdetdecoder.rpn_head'): v for k, v in state_dict.items()}
        # state_dict = {k.replace("roi_head","rotdetdecoder.roi_head"): v for k, v in state_dict.items()}
        # if list(state_dict.keys())[0].startswith('neck'):
        #     state_dict = {k.replace('neck', 'rotdetdecoder.neck'): v for k, v in state_dict.items()}
        # if list(state_dict.keys())[0].startswith('roi_head'):
        #     state_dict = {k.replace('roi_head', 'rotdetdecoder.roi_head'): v for k, v in state_dict.items()}

        rank = dist.get_rank()
        msg = self.load_state_dict(state_dict, False)
        if rank == 0:
            print(msg)
        


    def train_rotdet_roi_head_box_head_forward(self, r, sampling_results, num_classes, fc_cls=None, fc_reg=None):
        bbox_feats, rois = self.rotdetdecoder.roi_head.train_bbox_forward(r, sampling_results)
        # box head of roi head
        x_cls, x_reg = self.rotdetdecoder.roi_head.bbox_head(bbox_feats)
        cls_score, bbox_pred = fc_cls(x_cls), fc_reg(x_reg)
        bbox_results = dict(
            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
        bbox_results = self.rotdetdecoder.roi_head.bbox_loss(num_classes, sampling_results, rois, bbox_results)
        return bbox_results
    
    def test_rotdet_roi_head_box_head_forward(self, r, batch_img_metas, proposals, rois, rcnn_test_cfg, num_classes, bbox_rescale, fc_cls=None, fc_reg=None):
        bbox_feats = self.rotdetdecoder.roi_head.test_bbox_roi_feats(r, rois)
        x_cls, x_reg = self.rotdetdecoder.roi_head.bbox_head(bbox_feats)
        cls_score, bbox_pred = fc_cls(x_cls), fc_reg(x_reg)
        bbox_results = dict(
            cls_score=cls_score, bbox_pred=bbox_pred, bbox_feats=bbox_feats)
        result_list = self.rotdetdecoder.roi_head.predict_bbox(
        batch_img_metas, bbox_results, proposals, rois, rcnn_test_cfg, num_classes, rescale = bbox_rescale
        )
        # pprint(result_list)
        return result_list

    def forward(self, data):

        if self.training:
            data = self.data_preprocessor(data, True)
        else:
            data = self.data_preprocessor(data, False)

        x, data_sample = data['inputs'], data['data_samples']

        b = x.shape[0]
        e = self.encoder(x)


        if 'ss' in self.args.tasks:
            ss = self.semsegdecoder.decode_head._forward_feature(e)
            seg_logits = self.semseghead(ss)

        ######################### rot det
            
        if 'rd' in self.args.tasks:
            rd = self.rotdetdecoder.neck(e)

        if 'cls' in self.args.tasks:

            gt_scene_cls = [
                data.scene_classes.get("scene_classes") for data in data_sample
            ]
            gt_scene_cls = torch.stack(gt_scene_cls, dim=0).squeeze(1).cuda()
            cls_fn = nn.CrossEntropyLoss()


        ######################### trainning
        if self.training:
            losses = [0, 0, 0]
            ######################### train sem seg
            
            if 'ss' in self.args.tasks:
                loss_ss = self.semsegdecoder.decode_head.loss_by_feat(seg_logits, data_sample)
                losses[0] = loss_ss

             ######################### train rot det
            if 'rd' in self.args.tasks:
                # rpn head
                loss_rd, tbr = self.rotdetdecoder.train_before_roihead(rd, data_sample)
                # select propsal
                sampling_results = self.rotdetdecoder.roi_head.gen_sampling_results(tbr)
                # roi_head_box_head
                bbox_results = self.train_rotdet_roi_head_box_head_forward(rd, sampling_results, self.rd_classes,
                                                                                        fc_cls = self.rotdetroiboxhead_fc_cls,
                                                                                        fc_reg = self.rotdetroiboxhead_fc_reg)
                
                loss_rd.update(bbox_results['loss_bbox'])
                losses[1] = loss_rd
            if 'cls' in self.args.tasks:
                cls_pred = self.sceneclshead(e[3])
                # print(cls_pred.shape, gt_scene_cls.shape)
                losses[2] = cls_fn(cls_pred, gt_scene_cls)
                
            return losses

        ######################### validation
        else:
            outputs = [0, 0, 0]
            
            if 'ss' in self.args.tasks:
                if data_sample is not None:
                    batch_img_metas = [
                        data_sample.metainfo for data_sample in data_sample
                    ]
                    #print(batch_img_metas)
                else:
                    batch_img_metas = [
                        dict(
                            ori_shape=x.shape[2:],
                            img_shape=x.shape[2:],
                            pad_shape=x.shape[2:],
                            padding_size=[0, 0, 0, 0])
                    ] * x.shape[0]

                seg_logits = self.semsegdecoder.decode_head.predict_by_feat(seg_logits, batch_img_metas)
                output_ss = self.semsegdecoder.postprocess_result(seg_logits, data_sample)

                outputs[0] = output_ss

            ######################### test rot det
                
            if 'rd' in self.args.tasks:
                rpn_results_list, rescale = self.rotdetdecoder.test_before_roihead(rd, data_sample)
                assert self.rotdetdecoder.roi_head.with_bbox, 'Bbox head must be implemented.'

                batch_img_metas = [
                    data_samples.metainfo for data_samples in data_sample
                ]
                # pprint(batch_img_metas)
                proposals, rois = self.rotdetdecoder.roi_head.test_bbox_generate_roi(rpn_results_list)
                rcnn_test_cfg = self.rotdetdecoder.roi_head.test_cfg
                bbox_rescale = rescale if not self.rotdetdecoder.roi_head.with_mask else False

                if rois.shape[0] == 0:
                    results_list = empty_instances(
                        batch_img_metas,
                        rois.device,
                        task_type='bbox',
                        box_type=self.rotdetdecoder.roi_head.bbox_head.predict_box_type,
                        num_classes=self.rd_classes,
                        score_per_cls=rcnn_test_cfg is None)
                else:
                    results_list = self.test_rotdet_roi_head_box_head_forward(rd, batch_img_metas, proposals, rois, 
                                                                            rcnn_test_cfg, self.rd_classes, bbox_rescale, 
                                                                            fc_cls = self.rotdetroiboxhead_fc_cls, 
                                                                            fc_reg = self.rotdetroiboxhead_fc_reg)
                    
                output_rd = self.rotdetdecoder.add_pred_to_datasample(data_sample, results_list)
                outputs[1] = output_rd

            if 'cls' in self.args.tasks:

                cls_pred = self.sceneclshead(e[3]).argmax(dim=1)
                # losses[2] = cls_fn(cls_pred,gt_scene_cls)
                acc = accuracy_fn(gt_scene_cls, cls_pred)
                outputs[2] = acc

            return outputs

class Residual(nn.Module):
    def __init__(self, fn):
        super().__init__()
        self.fn = fn
    def forward(self, x):
        x1=self.fn(x)
        return x1+x

class DefMixer(nn.Module):
    def __init__(self,dim_in, dim, depth=1, kernel_size=1):
        super(DefMixer, self).__init__()

        self.blocks = nn.Sequential(
            *[nn.Sequential(
                    nn.Conv2d(dim, dim, kernel_size=kernel_size),
                    nn.GELU(),
                    nn.BatchNorm2d(dim),
                    Residual(nn.Sequential(
                        ChlSpl(dim, dim, (1, 3), 1, 0),
                        nn.GELU(),
                        nn.BatchNorm2d(dim)
                    )),
            ) for i in range(depth)],
        )

    def forward(self, x):
        x = self.blocks(x)
        return x

class ChlSpl(nn.Module):

    def __init__(
        self,
        in_channels: int,
        out_channels: int,
        kernel_size,
        stride: int = 1,
        padding: int = 0,
        dilation: int = 1,
        groups: int = 1,
        bias: bool = True,
    ):
        super(ChlSpl, self).__init__()

        if in_channels % groups != 0:
            raise ValueError('in_channels must be divisible by groups')
        if out_channels % groups != 0:
            raise ValueError('out_channels must be divisible by groups')
        if stride != 1:
            raise ValueError('stride must be 1')
        if padding != 0:
            raise ValueError('padding must be 0')

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.kernel_size = kernel_size
        self.stride = _pair(stride)
        self.padding = _pair(padding)
        self.dilation = _pair(dilation)
        self.groups = groups

        self.weight = nn.Parameter(torch.empty(out_channels, in_channels // groups, 1, 1))

        self.get_offset = Offset(dim=in_channels, kernel_size=3)

        if bias:
            self.bias = nn.Parameter(torch.empty(out_channels))
        else:
            self.register_parameter('bias', None)
        self.register_buffer('offset', self.gen_offset())

        self.reset_parameters()

    def reset_parameters(self) -> None:
        init.kaiming_uniform_(self.weight, a=math.sqrt(5))

        if self.bias is not None:
            fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
            bound = 1 / math.sqrt(fan_in)
            init.uniform_(self.bias, -bound, bound)
    def gen_offset(self):
        offset = torch.empty(1, self.in_channels*2, 1, 1)
        start_idx = (self.kernel_size[0] * self.kernel_size[1]) // 2
        assert self.kernel_size[0] == 1 or self.kernel_size[1] == 1, self.kernel_size
        for i in range(self.in_channels):
            if self.kernel_size[0] == 1:
                offset[0, 2 * i + 0, 0, 0] = 0
                offset[0, 2 * i + 1, 0, 0] = (i + start_idx) % self.kernel_size[1] - (self.kernel_size[1] // 2)
            else:
                offset[0, 2 * i + 0, 0, 0] = (i + start_idx) % self.kernel_size[0] - (self.kernel_size[0] // 2)
                offset[0, 2 * i + 1, 0, 0] = 0
        return offset

    def forward(self, input: Tensor) -> Tensor:
        """
            input: Tensor[b,c,h,w]
        """
        offset_2 = self.get_offset(input)
        B, C, H, W = input.size()

        return deform_conv2d_tv(input, offset_2, self.weight, self.bias, stride=self.stride, padding=self.padding, dilation=self.dilation)

    def extra_repr(self) -> str:
        s = self.__class__.__name__ + '('
        s += '{in_channels}'
        s += ', {out_channels}'
        s += ', kernel_size={kernel_size}'
        s += ', stride={stride}'
        s += ', padding={padding}' if self.padding != (0, 0) else ''
        s += ', dilation={dilation}' if self.dilation != (1, 1) else ''
        s += ', groups={groups}' if self.groups != 1 else ''
        s += ', bias=False' if self.bias is None else ''
        s += ')'
        return s.format(**self.__dict__)

class Offset(nn.Module):
    def __init__(self, dim, kernel_size):
        super().__init__()
        self.kernel_size = kernel_size
        self.stride = 1
        self.p_conv = nn.Conv2d(dim, 2*kernel_size*kernel_size, kernel_size=3, padding=1, stride=1)
        nn.init.constant_(self.p_conv.weight, 0)
        self.p_conv.register_backward_hook(self._set_lr)
        self.opt = nn.Conv2d(2*self.kernel_size*self.kernel_size, dim*2, kernel_size=3, padding=1, stride=1, groups=2)


    @staticmethod
    def _set_lr(module, grad_input, grad_output):
        grad_input = (grad_input[i] * 0.1 for i in range(len(grad_input)))
        grad_output = (grad_output[i] * 0.1 for i in range(len(grad_output)))

    def _get_p(self, offset, dtype):
        N, h, w = offset.size(1)//2, offset.size(2), offset.size(3)

        p_n = self._get_p_n(N, dtype)
        p_0 = self._get_p_0(h, w, N, dtype)
        p = p_0 + p_n + offset
        return p

    def _get_p_n(self, N, dtype):
        p_n_x, p_n_y = torch.meshgrid(
            torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1),
            torch.arange(-(self.kernel_size-1)//2, (self.kernel_size-1)//2+1))
        p_n = torch.cat([torch.flatten(p_n_x), torch.flatten(p_n_y)], 0)
        p_n = p_n.view(1, 2*N, 1, 1).type(dtype)
        return p_n

    def _get_p_0(self, h, w, N, dtype):
        p_0_x, p_0_y = torch.meshgrid(
            torch.arange(1, h*self.stride+1, self.stride),
            torch.arange(1, w*self.stride+1, self.stride))
        p_0_x = torch.flatten(p_0_x).view(1, 1, h, w).repeat(1, N, 1, 1)
        p_0_y = torch.flatten(p_0_y).view(1, 1, h, w).repeat(1, N, 1, 1)
        p_0 = torch.cat([p_0_x, p_0_y], 1).type(dtype)
        return p_0

    def forward(self, x):
        offset = self.p_conv(x)
        dtype = offset.data.type()
        N = offset.size(1) // 2
        p = self._get_p(offset, dtype)  #1,18,107,140
        p =self.opt(p)
        return p

class ConvBNReLU(nn.Module):
    def __init__(self, in_channels, out_channels, kernel_size=3, stride=1, dilation=1, groups=1,
                 norm_layer=nn.BatchNorm2d, activation_layer=nn.ReLU, bias='auto',
                 inplace=True, affine=True):
        super().__init__()
        padding = dilation * (kernel_size - 1) // 2
        self.use_norm = norm_layer is not None
        self.use_activation = activation_layer is not None
        if bias == 'auto':
            bias = not self.use_norm
        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size, stride, padding,
                              dilation=dilation, groups=groups, bias=bias)
        if self.use_norm:
            self.bn = norm_layer(out_channels, affine=affine)
        if self.use_activation:
            self.activation = activation_layer(inplace=inplace)

    def forward(self, x):
        x = self.conv(x)
        if self.use_norm:
            x = self.bn(x)
        if self.use_activation:
            x = self.activation(x)
        return x



if __name__ =="__main__":
    # MutliTaskPretrnFramework()
    TestFrameWork()










