import torch
import torch.nn as nn
from mmdet_custom.models.backbones import InternImage, build_norm_layer, build_act_layer
from mmdet_custom.models import DINOHead 


class ALLINONE(nn.Module):
    def __init__(self, config):
        super(ALLINONE, self).__init__()
        self.backbone = InternImage(**config.backbone)
        
        ## for classification
        ## used for classification for InternImage-T/S/B/L/XL    H/G TODO
        self.num_levels = len(config.backbone.depths)
        self.num_features = int(config.backbone.channels * 2**(self.num_levels - 1))
        self.cls_scale=1.5
        self.conv_head = nn.Sequential(
                nn.Conv2d(self.num_features,
                          int(self.num_features * self.cls_scale),kernel_size=1,bias=False),
                build_norm_layer(int(self.num_features * self.cls_scale), 'BN',
                                 'channels_first', 'channels_first'),
                build_act_layer('GELU'))
        self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
        self.head = nn.Linear(int(self.num_features * self.cls_scale), config.cls_model.num_classes) \
                if config.cls_model.num_classes > 0 else nn.Identity()
                
        # build criterion
        from timm.loss import LabelSmoothingCrossEntropy, SoftTargetCrossEntropy
        if config.cls_model.aug.mixup > 0.:
            # smoothing is handled with mixup label transform
            self.criterion = SoftTargetCrossEntropy()
        elif config.cls_model.loss.label_smoothing > 0.:
            self.criterion = LabelSmoothingCrossEntropy(smoothing=config.cls_model.loss.label_smoothing)
        else:
            self.criterion = torch.nn.CrossEntropyLoss()

        ## for detection
        self.has_neck = False
        if config.detection_model.neck is not None:
            from mmdet.models.necks.channel_mapper import ChannelMapper
            self.neck = ChannelMapper(**config.detection_model.neck)
            self.has_neck = True
            config.detection_model.bbox_head.update(train_cfg=config.detection_model['train_cfg'])
            config.detection_model.bbox_head.update(test_cfg=config.detection_model['test_cfg'])
            
           
        self.bbox_head = DINOHead(**config.detection_model['bbox_head'])
        
        
        # ## for segmentation
        # # use the base model config ,detail in mmsegmentation/mmseg/models/segmentors/encoder_decoder.py
        # from mmseg.models.segmentors import EncoderDecoder
        # self.seg_has_neck = False
        # if config.segmentation_model.neck is not None:
            
        #     self.seg_has_neck = True
        
        
    def forward(self, 
                img,
                img_metas=None,
                gt_bboxes=None,
                gt_labels=None,
                gt_bboxes_ignore=None, task='classification', return_loss=False, rescale=True): 
        # task used for part specific task: eg: classifiacation  detection, segmation ...
        if isinstance(img, list) and isinstance(img[0], torch.Tensor):
            img = img[0]
            img_metas = img_metas[0][0]
            
        mtl_features, forward_out = self.backbone.forward_features_seq_out(img)
        
        # classification
        if task == "classification":
            x = self.conv_head(forward_out[-1])
            x = self.avgpool(x)
            x = torch.flatten(x, 1)
            cls_out = self.head(x)
            cls_loss = 0.0
            if self.training:
                cls_loss = self.criterion(cls_out, gt_labels)    
            return cls_loss, cls_out
            
            # cls_x = self.conv_head(forward_out[-1])
            # cls_pool_x = self.head(cls_x)
            # cls_out = torch.flatten(cls_pool_x, 1)
            # cls_loss = self.criterion(cls_out, gt_labels)
            # # just for for InternImage-T/S/B/L/XL
            
            # # need add for InternImage-H/G: x = self.forward_clip_projector(x)
            # return cls_loss, cls_out
        
        
        # detection
        if task == "detection":
            if self.has_neck:
                mtl_features = self.neck(mtl_features[1:])
            
            if self.training:
                dec_loss, outs, log_vars = self.bbox_head.forward_train(img, mtl_features, img_metas, gt_bboxes,
                                                gt_labels, gt_bboxes_ignore)  # ours: outputs_classes, outputs_coords, topk_score, topk_anchor
                return dec_loss, outs, log_vars
            else:
                result = self.bbox_head.forward_val(mtl_features, [img], [img_metas], rescale=rescale, return_loss=return_loss)
                return result

        if task == "segmation":
            pass
        
        
    