import logging
import fvcore.nn.weight_init as weight_init
from typing import Optional
import torch
from torch import nn, Tensor
from torch.nn import functional as F
import numpy as np

from detectron2.config import configurable
from detectron2.layers import Conv2d
from detectron2.structures import Boxes, ImageList, Instances, BitMasks


from .position_encoding import PositionEmbeddingSine
from .maskformer_transformer_decoder import TRANSFORMER_DECODER_REGISTRY
from .mix_decoder import MixDecoder
from .retrieval_global_attention import RetrievalGlobalAttention
from .retrieval_local_attention import RetrievalLocalAttention

# @TRANSFORMER_DECODER_REGISTRY.register()
class InstanceRetrievalDecoder(nn.Module):
    # @configurable
    def __init__(
        self,
        input_channel=256
    ):
        super().__init__()
        
        dG = 1024
        dL = 1024
        self.global_attn = RetrievalGlobalAttention(256)
        self.local_attn = RetrievalLocalAttention(out_channels=dL)
        self.out_feature_dim = 4096
        self.input_proj = None
        if input_channel != 256:
            self.input_proj = nn.Conv2d(input_channel, 256, kernel_size=1)
        
        self.feature_proj = nn.Linear(dL+dG, dL+dG)
        
    # @classmethod
    def from_config(cls, cfg):
        ret = {}

        ret["num_classes"] = cfg.MODEL.SEM_SEG_HEAD.NUM_CLASSES
        ret["hidden_dim"] = cfg.MODEL.MASK_FORMER.HIDDEN_DIM
        ret["num_queries"] = cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES
        # Transformer parameters:
        ret["nheads"] = cfg.MODEL.MASK_FORMER.NHEADS
        ret["dropout"] = cfg.MODEL.MASK_FORMER.DROPOUT
        ret["dim_feedforward"] = cfg.MODEL.MASK_FORMER.DIM_FEEDFORWARD
        ret["enc_layers"] = cfg.MODEL.MASK_FORMER.ENC_LAYERS
        ret["dec_layers"] = cfg.MODEL.MASK_FORMER.DEC_LAYERS
        ret["pre_norm"] = cfg.MODEL.MASK_FORMER.PRE_NORM
        ret["deep_supervision"] = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION
        ret["enforce_input_project"] = cfg.MODEL.MASK_FORMER.ENFORCE_INPUT_PROJ
        ret["mask_dim"] = cfg.MODEL.SEM_SEG_HEAD.MASK_DIM

        return ret

    ##in multi_scale_features B C H W 
    '''
    EXAMPLE
    SRC IMAGE 10*512*512
    features:res2 10, 256, 128, 128
    features:res3 10, 512, 64, 64
    features:res4 10, 1024, 32, 32
    features:res5 10, 2048, 16, 16
    mask_features :10, 256, 128, 128
    transformer_encoder_features :10, 256, 16, 16
    multi_scale_features[0]:10, 256, 16, 16
    multi_scale_features[1]:10, 256, 32, 32
    multi_scale_features[2]:10, 256, 64, 64
    
    b_id_boxes:
        int tensor:b,id,x0,y0,x1,y1 
    '''
    def forward(self,mask_features,boxes,btensor):
        if self.input_proj is not None:
            mask_features = self.input_proj(mask_features)
        bs,c,h4,w4 = mask_features.shape
        mix_feats_tensor = torch.tensor([]).to(mask_features)      
        for bnum in range(bs):
            zzf = self.inference(mask_features,boxes,btensor,bnum)
            if zzf is not None:
                mix_feats_tensor = torch.cat((mix_feats_tensor,zzf),dim=0)
        return mix_feats_tensor
    
    def inference(self,mask_features,boxes,btensor,bnum):
        b_index = torch.where(btensor == bnum)
        b_box = boxes[b_index]
        if(len(b_box) < 1):
            return None
        mask_feature = mask_features[[bnum]]
        g_features = self.global_attn.inference(mask_feature,b_box)
        loc_features = self.local_attn.inference(mask_feature,b_box)
        zzf = torch.cat([loc_features,g_features],dim=-1)
        # loc_features = self.local_attn.inference(mask_feature,b_box)
        # zzf = loc_features
        zzf = self.feature_proj(zzf)
        zzf = F.normalize(zzf,p=2,dim=-1)
        return zzf
    
    def inference_one(self,mask_feature,boxes):
        if(len(boxes) < 1):
            return None
        if self.input_proj is not None:
            mask_feature = self.input_proj(mask_feature)
        g_features = self.global_attn.inference(mask_feature,boxes)
        loc_features = self.local_attn.inference(mask_feature,boxes)
        zzf = torch.cat([loc_features,g_features],dim=-1)
        # loc_features = self.local_attn.inference(mask_feature,boxes)
        # zzf = loc_features
        zzf = self.feature_proj(zzf)
        zzf = F.normalize(zzf,p=2,dim=-1)
        return zzf