import logging
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
import numpy as np

from detectron2.config import configurable
from detectron2.layers import Conv2d
from detectron2.structures import Boxes, ImageList, Instances, BitMasks


from .position_encoding import PositionEmbeddingSine
from .maskformer_transformer_decoder import TRANSFORMER_DECODER_REGISTRY
from .mix_decoder import MixDecoder

# @TRANSFORMER_DECODER_REGISTRY.register()
class MixInstanceDecoder(nn.Module):
    # @configurable
    def __init__(
        self,
        in_channels = 2048,
        mask_classification=True,
    ):
        super().__init__()
        
        self.mix_decoder = MixDecoder(
            in_channels=in_channels,
            in_h=14,
            in_w=14,
            out_channels=1024,
            mix_depth=4,
            mlp_ratio=1,
            out_rows=4,
        )
        self.out_feature_dim = 4096
        
    # @classmethod
    def from_config(cls, cfg):
        ret = {}

        ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
        ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
        ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
        # Transformer parameters:
        ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
        ret["dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
        ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
        ret["enc_layers"] = cfg.MODEL.MASK_FORMER.ENC_LAYERS
        ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS
        ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
        ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
        ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
        ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM

        return ret

    ##in multi_scale_features B C H W 
    '''
    EXAMPLE
    SRC IMAGE 10*512*512
    features:res2 10, 256, 128, 128
    features:res2 10, 512, 64, 64
    features:res2 10, 1024, 32, 32
    features:res2 10, 2048, 16, 16
    mask_features :10, 256, 128, 128
    transformer_encoder_features :10, 256, 16, 16
    multi_scale_features[0]:10, 256, 16, 16
    multi_scale_features[1]:10, 256, 32, 32
    multi_scale_features[2]:10, 256, 64, 64
    
    b_id_boxes:
        int tensor:b,id,x0,y0,x1,y1 
    '''
    def forward(self,mask_features,multi_scale_features,boxes,btensor):
        bs,c,h4,w4 = mask_features.shape
        mix_feats_tensor = torch.tensor([]).to(mask_features)      
        for b in range(bs):
            b_index = torch.where(btensor == b)
            b_box = boxes[b_index]
            if(len(b_box) < 1):
                continue
            mix_feats = self.inference(mask_features,multi_scale_features,b,b_box)
            # nanvs = torch.isnan(mix_feats)
            # vvv = len(torch.where(nanvs)[0])
            # if (vvv > 1):
            #     vvv = 0
            mix_feats_tensor = torch.cat((mix_feats_tensor,mix_feats),dim=0)
        return mix_feats_tensor
            
    def inference(self,mask_features,multi_scale_features,b_num,b_boxes):
        ## b_num 需要推理的Batch索引号
        ## b_boxes 当前b_num的batch中，需要执行的b_boxes
        bs,c,h4,w4 = mask_features.shape
        _,_,h16,w16 = multi_scale_features[1].shape
                
        mask_feature = mask_features[b_num]
        
        ms_0_feature = multi_scale_features[0][b_num]
        ms_1_feature = multi_scale_features[1][b_num]
        ms_2_feature = multi_scale_features[2][b_num]
        
        boxex_features = torch.tensor([]).to(mask_feature)
        g_boxex_features = torch.tensor([]).to(mask_feature)
        
        for box in b_boxes:
            mask_feature_box = mask_feature[:,box[0]:box[2],box[1]:box[3]]
            mask_feature_box = F.interpolate(
                mask_feature_box.unsqueeze(dim=0), size=(h16,w16), mode="bilinear", align_corners=False
            ).squeeze(dim=0)
            
            #ms_2_feature
            box2 = box // 2
            mask_feature_box_2 = ms_2_feature[:,box2[0]:box2[2],box2[1]:box2[3]]
            mask_feature_box_2 = F.interpolate(
                mask_feature_box_2.unsqueeze(dim=0), size=(h16,w16), mode="bilinear", align_corners=False
            ).squeeze(dim=0)
            ms_features = mask_feature_box_2
            
            box4 = box2 // 2
            if (box4[2] > box4[0]) and (box4[3] > box4[1]):
                mask_feature_box_1 = ms_1_feature[:,box4[0]:box4[2],box4[1]:box4[3]]
                mask_feature_box_1 = F.interpolate(
                    mask_feature_box_1.unsqueeze(dim=0), size=(h16,w16), mode="bilinear", align_corners=False
                ).squeeze(dim=0)
                ms_features = mask_feature_box_1 + mask_feature_box_2

            # mask_feature_box = torch.concatenate((mask_feature_box,ms_features),dim=0)
            mask_feature_box = mask_feature_box.reshape(c*4,h16//2,w16//2)
            mask_feature_box_2 = mask_feature_box_2.reshape(c*4,h16//2,w16//2)
            
            boxex_features = torch.cat((boxex_features,mask_feature_box.unsqueeze(dim=0)),dim=0)
            g_boxex_features = torch.cat((g_boxex_features,mask_feature_box_2.unsqueeze(dim=0)),dim=0)
            
        mix_feats = self.mix_decoder.forward_global(boxex_features,g_boxex_features)
        return mix_feats
        
    def forward_1(self,mask_features,multi_scale_features,boxes,btensor):
        bs,c,h4,w4 = mask_features.shape
        _,_,h16,w16 = multi_scale_features[1].shape
        
        mix_feats_tensor = torch.tensor([]).to(mask_features)      
        
        for b in range(bs):

            b_index = torch.where(btensor == b)
            b_box = boxes[b_index]
            
            if(len(b_box) < 1):
                continue

            mask_feature = mask_features[b]
            ms_1_feature = multi_scale_features[1][b]
            boxex_features = torch.tensor([]).to(mask_feature)
            for box in b_box:
                mask_feature_box = mask_feature[:,box[0]:box[2],box[1]:box[3]]
                mask_feature_box = F.interpolate(
                    mask_feature_box.unsqueeze(dim=0), size=(h16,w16), mode="bilinear", align_corners=False
                ).squeeze(dim=0)
                mask_feature_box = mask_feature_box + ms_1_feature
                mask_feature_box = mask_feature_box.reshape(c*4,h16//2,w16//2)
                boxex_features = torch.cat((boxex_features,mask_feature_box.unsqueeze(dim=0)),dim=0)
            mix_feats = self.mix_decoder.forward(boxex_features)
            mix_feats_tensor = torch.cat((mix_feats_tensor,mix_feats),dim=0)
        
        return mix_feats_tensor

    def forward_targets(self,mask_features,multi_scale_features,targets):
        bs,c,h4,w4 = mask_features.shape
        _,_,h16,w16 = multi_scale_features[1].shape
        
        mix_feats_tensor = torch.tensor([]).to(mask_features)
        img_ids_tensor = torch.tensor([]).to(mask_features)
        
        
        for b in range(bs):
            tgt_masks = targets[b]["masks"]
            tgt_ids = targets[b]["gt_ids"]
            boxes = BitMasks(tgt_masks).get_bounding_boxes()
            
            valid_box = boxes.nonempty(threshold=8.0)
            
            boxes = boxes[valid_box]
            tgt_ids = tgt_ids[valid_box]

            if(len(boxes) < 1):
                continue
            boxes4 = np.ceil(boxes.tensor / 4 )
            boxes4= boxes4.to(torch.int)

            mask_feature = mask_features[b]
            ms_1_feature = multi_scale_features[1][b]
            boxex_features = torch.tensor([]).to(mask_feature)
            for box,id in zip(boxes4,tgt_ids):
                mask_feature_box = mask_feature[:,box[0]:box[2],box[1]:box[3]]
                mask_feature_box = F.interpolate(
                    mask_feature_box.unsqueeze(dim=0), size=(h16,w16), mode="bilinear", align_corners=False
                ).squeeze(dim=0)
                mask_feature_box = mask_feature_box + ms_1_feature
                mask_feature_box = mask_feature_box.reshape(c*4,h16//2,w16//2)
                boxex_features = torch.cat((boxex_features,mask_feature_box.unsqueeze(dim=0)),dim=0)
            mix_feats = self.mix_decoder.forward(boxex_features)
            mix_feats_tensor = torch.cat((mix_feats_tensor,mix_feats),dim=0)
            img_ids_tensor = torch.cat((img_ids_tensor,tgt_ids),dim=0)
        return mix_feats_tensor,img_ids_tensor
        
        
        
        