from torch import nn
import fvcore.nn.weight_init as weight_init
from util.misc import (NestedTensor, nested_tensor_from_tensor_list,
                       nested_tensor_from_videos_list,
                       accuracy, get_world_size, interpolate,
                       is_dist_avail_and_initialized, inverse_sigmoid)
from transformers import BertTokenizer, BertModel, RobertaModel, RobertaTokenizerFast

from torch.autograd import Variable
import torch

from models.segmentation import Conv2d,get_norm
import torch.nn.functional as F
from .tlggn_mask2former_transformer_decoderv2 import TLGGNMultiScaleMaskedTransformerDecoderv2
import numpy as np
import pickle
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
from einops import rearrange, repeat
import copy
from .pixel_decoder import TransformerEncoderPixelDecoder
from .sgdpv1 import SGDPv1
from models.matcher import build_matcher,build_nobox_matcher
from models.criterion import SetCriterion,NoBoxSetCriterion
from models.postprocessors import build_postprocessors
class FeatureResizer(nn.Module):
    """
    This class takes as input a set of embeddings of dimension C1 and outputs a set of
    embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
    """

    def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True):
        super().__init__()
        self.do_ln = do_ln
        # Object feature encoding
        self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True)
        self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12)
        self.dropout = nn.Dropout(dropout)

    def forward(self, encoder_features):
        x = self.fc(encoder_features)
        if self.do_ln:
            x = self.layer_norm(x)
        output = self.dropout(x)
        return output

class SegHead(nn.Module):
    def __init__(self,conv_dim,
                mask_dim,
                num_queries=20,
                K=5,
                dec_layers=6,
                obj_dec_layers=9):
        super().__init__()
        d_model=conv_dim
        self.pixel_decoder = TransformerEncoderPixelDecoder(
                d_model=d_model,
                conv_dim=conv_dim,
                mask_dim=mask_dim,
                dim_feedforward=1024,
                dropout=0.0,
                num_feature_levels=3,
                nhead=8,
                enc_n_points=4,
                num_encoder_layers=6)
        self.predictor = TLGGNMultiScaleMaskedTransformerDecoderv2(
            in_channels=256,
            num_classes=1,
            hidden_dim=256,
            num_queries=num_queries,
            nheads=8,
            dim_feedforward=2048,
            dec_layers=obj_dec_layers,
            pre_norm=False,
            mask_dim=256,
            enforce_input_project=False,
        )
        self.sgdp = SGDPv1(
                in_channels=256,
                 hidden_dim=256,
                 dec_layers=dec_layers,
                 conv_dim=256,
                 enforce_input_project=True,
                 num_classes=1,
                 num_queries=num_queries,
                 mask_dim=256,
                 aux_loss=True,
                 K=K)
        self.load_weights()

    def load_weights(self):
        file_path = './weights/model_final_86143f.pkl'
        with open(file_path, 'rb') as file:
            weights = pickle.load(file)['model']
        pixel_decoder_weights = {k.replace("sem_seg_head.pixel_decoder.", ""): torch.tensor(v) for k, v in weights.items() if k.startswith("sem_seg_head.pixel_decoder")}
        predictor_weights = {k.replace("sem_seg_head.predictor.", ""): torch.tensor(v) for k, v in weights.items() if k.startswith("sem_seg_head.predictor") and not k.startswith("sem_seg_head.predictor.class_embed") and not k.startswith("sem_seg_head.predictor.query_embed")}
        
        self.pixel_decoder.load_state_dict(pixel_decoder_weights, strict=False)
        self.predictor.load_state_dict(predictor_weights, strict=False)
    def forward(self,ori_srcs, text_sentence_features, text_word_features, lang_mask):
        mask_features, clip_mask_features, transformer_encoder_features, multi_scale_features = self.pixel_decoder(ori_srcs)
        frame_outputs, query_obj, query_pos, mask_features = self.predictor(multi_scale_features, mask_features, clip_mask_features, text_sentence_features, lang_mask)

        mask_features = self.sgdp.vita_mask_features(mask_features)

        vita_outputs = self.sgdp(query_obj, query_pos, text_sentence_features, text_word_features[0], lang_mask)

        vita_outputs["pred_masks"] = torch.einsum("tqc,tchw->tqhw", vita_outputs["pred_mask_embed"],mask_features)
        vita_outputs["pred_logits"] = vita_outputs["pred_logits"]
        for out in vita_outputs["aux_outputs"]:
            out["pred_logits"] = out["pred_logits"]
            out["pred_masks"] = torch.einsum("tqc,tchw->tqhw", out["pred_mask_embed"], mask_features)
        if self.training:
            return frame_outputs, vita_outputs
        return vita_outputs

class TLGGNModel(nn.Module):
    def __init__(self, backbone, sem_seg_head , hidden_dim=256):
        super().__init__()
        self.backbone = backbone
        self.sem_seg_head = sem_seg_head

        self.tokenizer = RobertaTokenizerFast.from_pretrained('./weights/roberta')
        self.text_encoder = RobertaModel.from_pretrained('./weights/roberta')

        for p in self.text_encoder.parameters():
            p.requires_grad_(False)

        # resize the bert output channel to transformer d_model
        self.resizer = FeatureResizer(
            input_feat_size=768,
            output_feat_size=hidden_dim,
            dropout=0.1,
        )
    def forward_text(self, captions, device):
        if isinstance(captions[0], str):
            tokenized = self.tokenizer.batch_encode_plus(captions, padding="longest", return_tensors="pt").to(device)
            encoded_text = self.text_encoder(**tokenized)
            # encoded_text.last_hidden_state: [batch_size, length, 768]
            # encoded_text.pooler_output: [batch_size, 768]
            text_attention_mask = tokenized.attention_mask.ne(1).bool()
            # text_attention_mask: [batch_size, length]

            text_features = encoded_text.last_hidden_state 
            #text_features = self.resizer(text_features)    
            text_masks = text_attention_mask              
            text_features = NestedTensor(text_features, text_masks) # NestedTensor

            text_sentence_features = encoded_text.pooler_output  
            text_sentence_features = self.resizer(text_sentence_features)  
        else:
            raise ValueError("Please mask sure the caption is a list of string")
        return text_features, text_sentence_features
    def forward(self, samples: NestedTensor, captions, targets):

        # Backbone
        if not isinstance(samples, NestedTensor):
            samples = nested_tensor_from_videos_list(samples) 
        inputs = samples.decompose()[1]
        t = inputs.size()[1]
        b = len(captions)
        text_features, text_sentence_features = self.forward_text(captions, device=inputs.device)
        
        text_word_features, lang_mask = text_features.decompose() 
        text_word_features = text_word_features.permute(0, 2, 1)

        text_word_features = text_word_features.repeat(t, 1, 1)
        lang_mask = ~lang_mask
        lang_mask = lang_mask.unsqueeze(dim=-1)
        lang_mask = lang_mask.repeat(t, 1, 1)

        # features (list[NestedTensor]): res2 -> res5, shape of tensors is [B*T, Ci, Hi, Wi]
        # pos (list[Tensor]): shape of [B*T, C, Hi, Wi]
        features, pos = self.backbone(samples,text_word_features,lang_mask)         

        feats_name = ['res2','res3','res4','res5']
        srcs = {}
        masks = []
        poses = []
        text_word = []
        for l, (feat, pos_l) in enumerate(zip(features, pos)): 
            src, mask = feat.decompose()             
            n, c, h, w = src.shape
            srcs[feats_name[l]] = src

        outputs = self.sem_seg_head(srcs, text_sentence_features, text_word_features, lang_mask)

        return outputs



def build_tlggnv2(args):
    if args.binary:
        num_classes = 1
    else:
        if args.dataset_file == 'ytvos':
            num_classes = 65 
        elif args.dataset_file == 'davis':
            num_classes = 78
        elif args.dataset_file == 'a2d' or args.dataset_file == 'jhmdb':
            num_classes = 1
        else: 
            num_classes = 91 # for coco
    device = torch.device(args.device)
    args.add_lang_fusion = True

    # backbone
    if 'video_swin' in args.backbone:
        from models.video_swin_transformer import build_video_swin_backbone
        backbone = build_video_swin_backbone(args)
    elif 'swin' in args.backbone:
        from models.swin_transformer import build_swin_backbone
        backbone = build_swin_backbone(args) 
    elif 'convnext' in args.backbone:
        from models.convnext import build_convnext_backbone
        backbone = build_convnext_backbone(args)
    elif 'convmae' in args.backbone:
        from models.convmae import build_convmae_backbone
        backbone = build_convmae_backbone(args)
    else:
        backbone = build_backbone(args)

    ###model config####
    dec_layers = 6
    args.cls_loss_coef = 2
    args.mask_loss_coef = 5
    args.dice_loss_coef = 5
    
    seg_head = SegHead(
        conv_dim=256,
        mask_dim=256,
        num_queries=args.num_queries,
        K=args.K,
        dec_layers=dec_layers,
    )

    model = TLGGNModel(
        backbone,
        seg_head
    )
    from .losses.tlggn_matcher import build_nobox_matcher
    from .losses.tlggn_criterion import TlggnSetCriterion
    matcher = build_nobox_matcher(args)
    weight_dict = {}
    weight_dict['loss_ce'] = args.cls_loss_coef
    # weight_dict['loss_bbox'] = args.bbox_loss_coef
    # weight_dict['loss_giou'] = args.giou_loss_coef
    if args.masks: # always true
        weight_dict['loss_mask'] = args.mask_loss_coef
        weight_dict['loss_dice'] = args.dice_loss_coef
    # TODO this is a hack
    if args.aux_loss:
        aux_weight_dict = {}
        for i in range(dec_layers - 1):
            aux_weight_dict.update({k + f'_{i}': v for k, v in weight_dict.items()})
        weight_dict.update(aux_weight_dict)

    losses = ['labels']
    if args.masks:
        losses += ['masks']
    criterion = TlggnSetCriterion(
            num_classes, 
            matcher=matcher,
            weight_dict=weight_dict, 
            eos_coef=args.eos_coef, 
            losses=losses,
            focal_alpha=args.focal_alpha)
    criterion.to(device)

    # postprocessors, this is used for coco pretrain but not for rvos
    postprocessors = build_postprocessors(args, args.dataset_file)
    return model, criterion, postprocessors