# Copyright (c) Tencent Inc. All rights reserved.
import itertools
from typing import List, Sequence, Tuple
import torch
from torch import Tensor, nn
from torch.nn.modules.batchnorm import _BatchNorm
from mmengine.model import BaseModule
from mmyolo.registry import MODELS
import torch.nn.functional as F
from mmdet.utils import OptMultiConfig, ConfigType
from transformers import AutoTokenizer, AdamW, get_linear_schedule_with_warmup, Blip2Processor, Blip2Model
from transformers import (AutoTokenizer, AutoModel, CLIPTextConfig,CLIPModel, AutoConfig, AutoModelForMaskedLM, AutoTokenizer, CLIPTextModel, RobertaModel)
from transformers import CLIPTextModelWithProjection as CLIPTP
from transformers import AutoTokenizer, AutoModel, BeitConfig, BeitModel, XLMRobertaTokenizer, AlignTextConfig, AlignTextModel, AltCLIPTextModel , AltCLIPTextConfig
from typing import List, Sequence, Dict , Optional


# Global variable to store the image model
global_image_model = None


@MODELS.register_module()
class HuggingVisionBackbone(BaseModule):

    def __init__(self,
                 model_name: str,
                 out_indices: Sequence[int] = (0, 1, 2, 3),
                 norm_eval: bool = True,
                 frozen_modules: Sequence[str] = (),
                 init_cfg: OptMultiConfig = None) -> None:

        super().__init__(init_cfg=init_cfg)

        self.norm_eval = norm_eval
        self.frozen_modules = frozen_modules
        self.model = AutoModel.from_pretrained(model_name)

        self._freeze_modules()

    def forward(self, image: Tensor) -> Tuple[Tensor]:
        encoded_dict = self.image_model(pixel_values=image,
                                        output_hidden_states=True)
        hidden_states = encoded_dict.hidden_states
        img_feats = encoded_dict.get('reshaped_hidden_states', hidden_states)
        img_feats = [img_feats[i] for i in self.image_out_indices]
        return tuple(img_feats)

    def _freeze_modules(self):
        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def train(self, mode=True):
        super().train(mode)
        self._freeze_modules()
        if mode and self.norm_eval:
            for m in self.modules():
                # trick: eval have effect on BatchNorm only
                if isinstance(m, _BatchNorm):
                    m.eval()

@MODELS.register_module() #This is the original code
class HuggingCLIPLanguageBackbone(BaseModule):
    def __init__(self,
                 model_name: str,
                 frozen_modules: Sequence[str] = (),
                 dropout: float = 0.0,
                 training_use_cache: bool = False,
                 init_cfg: OptMultiConfig = None) -> None:

        super().__init__(init_cfg=init_cfg)

        prompt_length: int = 10
        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache

        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        clip_config = CLIPTextConfig.from_pretrained(model_name,
                                                     attention_dropout=dropout)
        self.model = CLIPTP.from_pretrained(model_name, config=clip_config)

        # print(" TESSTING 1")

        self.prompt_learner = PromptLearnerV2(self.model, self.tokenizer, prompt_length)

        # self.prompt_learner = PromptLearner(context_length, classnames, embed_dim, vis_dim)

        # print(" TESSTING 2")


        self._freeze_modules()

    def forward_tokenizer(self, texts):
        if not hasattr(self, 'text'):
            text = list(itertools.chain(*texts))
            text = self.tokenizer(text=text, return_tensors='pt', padding=True)
            self.text = text.to(device=self.model.device)
        return self.text

    def forward(self, text: List[List[str]]) -> Tensor:
        num_per_batch = [len(t) for t in text]
        assert max(num_per_batch) == min(num_per_batch), (
            'number of sequences not equal in batch')
        text = list(itertools.chain(*text))
        text = self.tokenizer(text=text, return_tensors='pt', padding=True)
        # print("Text1 >>>>>>>>", text)

        text = text.to(device=self.model.device)
        # print("Text2 >>>>>>>>", text)
        txt_outputs = self.model(**text)
        # print("txt_outputs >>>>>>>>", txt_outputs)

        txt_feats = txt_outputs.text_embeds

        # text_features = self.prompt_learner(txt_feats, num_per_batch[0])  # Augmented text features

        txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
        txt_feats = txt_feats.reshape(-1, num_per_batch[0],
                                      txt_feats.shape[-1])


        # print("txt_feats_shape ::::::: ", txt_feats.shape)
        # print("text_features_shape ::::::: ", text_features.shape)

        return txt_feats

    def _freeze_modules(self):

        if len(self.frozen_modules) == 0:
            # not freeze
            return
        if self.frozen_modules[0] == "all":
            self.model.eval()
            for _, module in self.model.named_modules():
                module.eval()
                for param in module.parameters():
                    param.requires_grad = False
            return
        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def train(self, mode=True):
        super().train(mode)
        self._freeze_modules()

# ================== Conditional Prompt Learning for Vision-Language Models ================== # 

class PromptLearnerV2(nn.Module):
    def __init__(self, clip_model, tokenizer, prompt_length=10, template="{}", device='cuda:0'):
        super().__init__()
        # Set device (ensure model runs on GPU 0 by default)
        self.device = device
        # print("self.device", self.device)
        self.clip_model = clip_model.to(self.device)
        self.tokenizer = tokenizer
        self.prompt_length = prompt_length
        self.template = template

        # Determine the embedding dimension from the CLIP text model
        embed_dim = clip_model.config.hidden_size  # Typically 512 for CLIP
        self.prompt_embeddings = nn.Parameter(torch.randn(prompt_length, embed_dim).to(self.device))

        # Projection layer to reduce combined feature dimension to match CLIP's output
        self.projection = nn.Linear(embed_dim * 2, embed_dim).to(self.device)

    def forward(self, text_desc, num_per_batch):
        np_batch = num_per_batch
        batch_size = len(text_desc)
        
        # Insert text descriptions into the template
        prompts = [self.template.format(desc) for desc in text_desc]

        # Tokenize the prompts
        text_tokens = self.tokenizer(prompts, return_tensors='pt', padding=True, truncation=True)
        # print("self.device", self.device)
        # Move text tokens to the specified device (GPU 0)
        text_tokens = {key: val.to(self.device) for key, val in text_tokens.items()}

        # Obtain original text features from the CLIP text encoder
        with torch.no_grad():
            text_features = self.clip_model(**text_tokens).last_hidden_state.mean(dim=1)  # [batch_size, embed_dim]

        # Expand the prompt embeddings to match batch size and move to GPU 0
        prompt_features = self.prompt_embeddings.unsqueeze(0).expand(batch_size, -1, -1).mean(dim=1).to(self.device)  # [batch_size, embed_dim]

        # Combine text features with learned prompt embeddings
        combined_features = torch.cat([text_features, prompt_features], dim=1)  # [batch_size, 2 * embed_dim]

        # Project the combined features back to the embedding dimension
        combined_features = self.projection(combined_features)  # [batch_size, embed_dim]

        # Normalize the features
        combined_features = combined_features / combined_features.norm(p=2, dim=-1, keepdim=True)

        # Reshape the combined features based on the batch size and np_batch
        combined_features = combined_features.view(-1, np_batch, combined_features.shape[-1])

        return combined_features.float()

@MODELS.register_module() # This is not working Error: TypeError: forward() missing 1 required positional argument: 'num_per_batch'
class HuggingCLIPCocoOpLanguageBackbone(BaseModule):
    def __init__(self,
                 model_name: str,
                 frozen_modules: Sequence[str] = (),
                 dropout: float = 0.0,
                 training_use_cache: bool = False,
                 init_cfg: OptMultiConfig = None) -> None:

        super().__init__(init_cfg=init_cfg)

        prompt_length: int = 20
        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache

        print("PromptLearnerV1 :::::: ")

        # Tokenizer and CLIP text model initialization
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        clip_config = CLIPTextConfig.from_pretrained(model_name,
                                                     attention_dropout=dropout)
        self.model = CLIPTP.from_pretrained(model_name, config=clip_config)

        print("model_name :::::: ", model_name)
        print("clip_config :::::: ", clip_config)
        print("model :::::: ", self.model)


        # Initialize the PromptLearnerV2
        self.prompt_learner = PromptLearnerV2(self.model, self.tokenizer, prompt_length)

        print("PromptLearnerV2 :::::: ", self.prompt_learner)

        # Freeze specified modules
        self._freeze_modules()

    def _freeze_modules(self):
        """Freeze specific layers in the model."""
        if len(self.frozen_modules) == 0:
            return
        if self.frozen_modules[0] == "all":
            self.model.eval()
            for _, module in self.model.named_modules():
                module.eval()
                for param in module.parameters():
                    param.requires_grad = False
            return
        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def forward_tokenizer(self, texts):
        if not hasattr(self, 'text'):
            text = list(itertools.chain(*texts))
            text = self.tokenizer(text=text, return_tensors='pt', padding=True)
            self.text = text.to(device=self.model.device)
        return self.text

    def forward(self, text: List[List[str]]) -> torch.Tensor:
        print(" FN_forward ")

        """
        Forward pass of the model.
        Args:
            text: List of lists of text descriptions.
        Returns:
            The processed and augmented text features.
        """
        # Flatten the batch of text descriptions
        num_per_batch = [len(t) for t in text]
        assert max(num_per_batch) == min(num_per_batch), 'Number of sequences not equal in batch'
        text_flattened = list(itertools.chain(*text))
        # print("text_flattened :::::: ", text_flattened)
        # Generate features using PromptLearner
        text_features = self.prompt_learner(text_flattened)  # Augmented text features

        print("text_features :::::::::::::::: ", text_features)
        print("text_features :::::::::::::::: ", text_features.shape)


        return text_features

    def train(self, mode=True):
        super().train(mode)
        self._freeze_modules()


################### Cocopo V1 ###################

class PromptLearner(nn.Module):
    """Generates instance-conditioned prompts with dynamic handling of feature sizes."""
    def __init__(self, context_length: int, classnames: List[str], embed_dim: int, vis_dim: int):
        super().__init__()
        self.n_ctx = context_length
        self.ctx_dim = embed_dim
        self.vis_dim = vis_dim
        self.n_cls = 10

        # Learnable context vectors
        self.ctx = nn.Parameter(torch.randn(self.n_ctx, self.ctx_dim).to('cuda:0'))  # Ensure context is on cuda:0
        nn.init.normal_(self.ctx, std=0.02)

        # Initially define meta_net as None
        self.meta_net = None

        # Dynamically create meta_net based on the input feature size
        self._create_meta_net(self.vis_dim)

    def _create_meta_net(self, input_dim: int):
        """Dynamically create the meta_net based on the input feature size."""
        # Apply a limit to the input_dim to avoid excessively large networks
        max_input_dim = 256  # You can adjust this to a suitable limit based on available memory

        if input_dim > max_input_dim:
            input_dim = max_input_dim  # Limit the input dimension size to prevent excessive memory usage

        # Create the meta_net layers based on the adjusted input_dim
        self.meta_net = nn.Sequential(
            nn.Linear(input_dim, input_dim // 2),  # First layer dynamically adjusted based on input size
            nn.ReLU(inplace=True),
            nn.Linear(input_dim // 2, self.ctx_dim)  # Output size depends on ctx_dim
        ).to('cuda:0')  # Ensure meta_net is on cuda:0

    def forward(self, im_features: torch.Tensor) -> torch.Tensor:
        """
        Generate image-conditioned prompts.
        Args:
            im_features (torch.Tensor): Image features [batch_size, vis_dim]
        Returns:
            torch.Tensor: Instance-conditioned prompts [batch_size, n_ctx, ctx_dim]
        """
        # Ensure that all tensors are on the same device (cuda:0)
        device = 'cuda:0'  # Force to use cuda:0

        # Move im_features to cuda:0 (if not already)
        im_features = im_features.to(device)
        # print(f"im_features shape: {im_features.shape}")

        # Move ctx to the same device as im_features
        self.ctx = self.ctx.to(device)
        # print(f"self.ctx shape: {self.ctx.shape}")
        # Flatten the im_features tensor correctly before passing to meta_net
        if len(im_features.shape) > 2:
            im_features = im_features.view(im_features.size(0), -1)  # Flatten to [batch_size, vis_dim]
        # print(f"Flattened im_features shape: {im_features.shape}")

        # Check if the input size is compatible with the meta_net's first layer
        if im_features.shape[1] != self.vis_dim:
            self.vis_dim = im_features.shape[1]  # Update vis_dim to match input feature size
            self._create_meta_net(self.vis_dim)  # Recreate meta_net with the new size

        # Generate bias from image features
        bias = self.meta_net(im_features)  # [batch_size, ctx_dim]
        bias = bias.unsqueeze(1)  # [batch_size, 1, ctx_dim]
        # print(f"bias shape after meta_net: {bias.shape}")
        # Add bias to learnable context vectors
        ctx = self.ctx.unsqueeze(0)  # [1, n_ctx, ctx_dim]
        conditioned_ctx = ctx + bias  # [batch_size, n_ctx, ctx_dim]
        return conditioned_ctx

@MODELS.register_module() # Testing In Progress >>>>>> This is not working Error: TypeError: forward() missing 1 required positional argument: 'image_features'
class HuggingCLIPCocoOpLanguageBackbonev1(nn.Module):
    def __init__(self,
                 model_name: str,
                #  classnames: List[str],
                 frozen_modules: Sequence[str] = (),
                 dropout: float = 0.1,
                 context_length: int = 16,
                 training_use_cache: bool = False,
                 init_cfg: Optional[dict] = None) -> None:
        super().__init__()

        classnames = "data/texts/coco_class_texts.json"
        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache
        self.context_length = context_length

        # Tokenizer and model initialization for text model
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        clip_config = CLIPTextConfig.from_pretrained(model_name, attention_dropout=dropout)
        self.model = CLIPTP.from_pretrained(model_name, config=clip_config)

        prompt_length: int = 10

        # Linear transformation to align context and text sequence lengths
        # self.context_to_text_projection = nn.Linear(clip_config.hidden_size, clip_config.hidden_size)
        # Prompt learner initialization

        embed_dim = clip_config.hidden_size  # Text embedding dimension
        vis_dim = clip_config.hidden_size    # Same dimension for simplicity
        # print("context_length :::::::", context_length )
        # print("classnames :::::::", classnames )
        # print("embed_dim :::::::", embed_dim )
        # print("vis_dim :::::::", vis_dim )
        # self.prompt_learner = PromptLearner(context_length, classnames, embed_dim, vis_dim)
        self.prompt_learner = PromptLearnerV2(self.model, self.tokenizer, prompt_length)

        # Freeze specific modules
        self._freeze_modules()

    def _freeze_modules(self):
        """Freeze specific layers in the model."""
        if len(self.frozen_modules) == 0:
            return
        if self.frozen_modules[0] == "all":
            self.model.eval()
            for _, module in self.model.named_modules():
                module.eval()
                for param in module.parameters():
                    param.requires_grad = False
            return
        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def forward_tokenizer(self, texts):
        if not hasattr(self, 'text'):
            text = list(itertools.chain(*texts))
            text = self.tokenizer(text=text, return_tensors='pt', padding=True)
            self.text = text.to(device=self.model.device)
        return self.text


    def forward(self, text: List[List[str]]) -> torch.Tensor:
        """
        Forward pass for text encoding with conditional prompts.

        Args:
            text (List[List[str]]): Input text.
            image_features (torch.Tensor): Image features for conditioning [batch_size, vis_dim].

        Returns:
            torch.Tensor: Text embeddings conditioned on image features.
        """
        global global_image_model
        image_features = global_image_model

        # print("image_features ::::: ", image_features)
        # print("ENDDDDDDDDDDDDDDDDDDDDDd")
        # Tokenize text
        tokenized = self.forward_tokenizer(text)
        # print(f"Tokenized text shape: {tokenized.shape}")
        # Ensure image_features is the correct shape
        if isinstance(image_features, tuple):
            image_features = image_features[0]  # Unpack the tuple if necessary

        if len(image_features.shape) > 2:
            # Apply adaptive pooling if image features are more than 2D
            image_features = F.adaptive_avg_pool2d(image_features, (1, 1))  # Apply adaptive pooling
            image_features = image_features.view(image_features.size(0), -1)  # Flatten to [batch_size, vis_dim]
        # print(f"image_features shape: {image_features.shape}")
        # Generate instance-conditioned prompts from image features
        # conditional_prompts = self.prompt_learner(image_features)  # [batch_size, n_ctx, ctx_dim]

        num_per_batch = [len(t) for t in text]
        assert max(num_per_batch) == min(num_per_batch), 'Number of sequences not equal in batch'
        text_flattened = list(itertools.chain(*text))
        # print("text_flattened :::::: ", text_flattened)
        # Generate features using PromptLearner
        conditional_prompts = self.prompt_learner(text_flattened, num_per_batch[0])  # Augmented text features
        
        # print(f"conditional_prompts_image_features shape: {conditional_prompts.shape}")
        # Pass the tokenized text through the model
        txt_outputs = self.model(**tokenized)

        # Extract the last hidden state, which is the text embedding
        txt_feats = txt_outputs.last_hidden_state  # [batch_size, sequence_length, hidden_size]

        # Normalize text features
        txt_feats = txt_feats / (txt_feats.norm(p=2, dim=-1, keepdim=True) + 1e-6)
        # print(f"txt_feats shape: {txt_feats.shape}")

        # Get sequence length from text embeddings
        sequence_length = txt_feats.size(1)  # Length of the tokenized sequence
        # print(f"sequence_length : {sequence_length}")

        # Step 1: Adjust `conditional_prompts` to match the sequence length of `txt_feats`
        if conditional_prompts.size(1) != sequence_length:
            # Adjust the `conditional_prompts` by using interpolation (resize it to match the sequence length)
            # We'll use `F.interpolate` to resize the sequence dimension of `conditional_prompts`.
            
            # Get the current sequence length of conditional prompts
            prompt_sequence_length = conditional_prompts.size(1)

            # Resizing the `conditional_prompts` tensor to match the sequence length of `txt_feats`
            # We will resize it by interpolating along dimension 1 (sequence length)
            conditional_prompts = F.interpolate(
                conditional_prompts.permute(0, 2, 1),  # Swap sequence and feature dimensions
                size=(sequence_length),  # New size for the sequence dimension
                mode='linear',  # Interpolation mode: 'linear' is often used for this purpose
                align_corners=False
            ).permute(0, 2, 1)  # Swap back to original shape [batch_size, sequence_length, ctx_dim]

        # Step 3: Broadcast conditional_prompts to match the batch size of txt_feats
        if txt_feats.size(0) != conditional_prompts.size(0):
            # Repeat conditional_prompts across the batch dimension to match txt_feats' batch size
            # txt_feats = txt_feats.repeat(conditional_prompts.size(0), 1, 1)
            txt_feats = txt_feats[:conditional_prompts.size(0), :, :]
        
        # print(f"conditional_prompts after resizing: {conditional_prompts.shape}")

        txt_feats = txt_feats + conditional_prompts
        # print(f"txt_feats after add conditional_prompts: {txt_feats.shape}")

        return conditional_prompts

    def train(self, mode=True):
        """Switch between train and eval modes."""
        super().train(mode)
        self._freeze_modules()


# ================== Conditional Prompt Learning for Vision-Language Models ================== # 





# path: models/hugging_align_language_backbone.py it's working normally

# @MODELS.register_module()
# class HuggingALIGNLanguageBackbone(nn.Module):
#     def __init__(
#         self,
#         model_name: str,
#         frozen_modules: Sequence[str] = (),
#         dropout: float = 0.0,
#         target_hidden_size: int = 512,
#         training_use_cache: bool = False,
#     ) -> None:
#         super().__init__()
#         if not model_name:
#             raise ValueError("model_name must be a non-empty string.")

#         self.frozen_modules = frozen_modules
#         self.training_use_cache = training_use_cache

#         # Initialize ALIGN Tokenizer and Model
#         self.tokenizer = AutoTokenizer.from_pretrained(model_name)
#         self.config = AlignTextConfig.from_pretrained(
#             model_name, attention_dropout=dropout
#         )
#         self.model = AlignTextModel.from_pretrained(model_name, config=self.config)

#         # Projection layer for hidden size adjustment
#         self.projection = nn.Linear(self.config.hidden_size, target_hidden_size)

#         # Ensure frozen modules remain frozen
#         self._freeze_modules()

#     def forward_tokenizer(self, texts: List[List[str]]) -> Tensor:
#         """
#         Tokenizes input text using ALIGN tokenizer.
        
#         Args:
#             texts (List[List[str]]): Batch of text inputs.
        
#         Returns:
#             Tensor: Tokenized inputs on the same device as the model.
#         """
#         if not all(isinstance(sublist, list) for sublist in texts):
#             raise ValueError("Input texts must be a list of lists of strings.")

#         flat_text = list(itertools.chain.from_iterable(texts))
#         tokens = self.tokenizer(
#             text=flat_text,
#             return_tensors="pt",
#             padding=True,
#             truncation=True,
#             max_length=512,
#         )
#         return tokens.to(self.model.device)

#     def forward(self, text: List[List[str]]) -> Tensor:
#         """
#         Encodes text using ALIGN and processes embeddings.
        
#         Args:
#             text (List[List[str]]): Batch of text inputs.
        
#         Returns:
#             Tensor: Processed embeddings reshaped to batch format.
#         """
#         if not text or not all(isinstance(t, list) for t in text):
#             raise ValueError("Input text must be a non-empty list of lists.")

#         num_per_batch = [len(t) for t in text]
#         if max(num_per_batch) != min(num_per_batch):
#             raise ValueError("All batch sequences must have the same length.")

#         tokens = self.forward_tokenizer(text)

#         # Forward pass through ALIGN model
#         outputs = self.model(**tokens)
#         hidden_states = outputs.last_hidden_state

#         # CLS token embeddings and normalization
#         cls_embeddings = hidden_states[:, 0, :]  # CLS token
#         cls_embeddings = F.normalize(cls_embeddings, p=2, dim=-1)

#         # Apply projection layer for dimensionality adjustment
#         projected_embeddings = self.projection(cls_embeddings)

#         # Reshape embeddings back to batch format
#         batch_size = len(text)
#         sequence_length = len(text[0])
#         reshaped_embeddings = projected_embeddings.view(
#             batch_size, sequence_length, projected_embeddings.size(-1)
#         )

#         return reshaped_embeddings

#     def _freeze_modules(self):
#         """Freezes specific or all modules of the model."""
#         if not self.frozen_modules:
#             return

#         if "all" in self.frozen_modules:
#             self.model.eval()
#             for param in self.model.parameters():
#                 param.requires_grad = False
#             return

#         # Freeze specified modules
#         for name, module in self.model.named_modules():
#             for frozen_name in self.frozen_modules:
#                 if name.startswith(frozen_name):
#                     module.eval()
#                     for param in module.parameters():
#                         param.requires_grad = False
#                     break

#     def train(self, mode: bool = True):
#         """
#         Ensures frozen modules remain frozen during training.
        
#         Args:
#             mode (bool): Whether to set the model to training mode.
#         """
#         super().train(mode)
#         self._freeze_modules()


# ============================ NEDD MORE TESTING ============================

# class SPGGenerator(nn.Module):
#     """Generative model for producing soft prompts."""
#     def __init__(self, input_dim: int, output_dim: int, hidden_dim: int = 512):
#         super().__init__()
#         self.fc1 = nn.Linear(input_dim, hidden_dim)
#         self.fc2 = nn.Linear(hidden_dim, output_dim)

#     def forward(self, x: Tensor) -> Tensor:
#         x = F.relu(self.fc1(x))
#         return self.fc2(x)

# class SPGDiscriminator(nn.Module):
#     """Discriminator for evaluating prompt authenticity."""
#     def __init__(self, input_dim: int, hidden_dim: int = 512):
#         super().__init__()
#         self.fc1 = nn.Linear(input_dim, hidden_dim)
#         self.fc2 = nn.Linear(hidden_dim, 1)

#     def forward(self, x: Tensor) -> Tensor:
#         x = F.relu(self.fc1(x))
#         return torch.sigmoid(self.fc2(x))


@MODELS.register_module()
class HuggingALIGNLanguageBackbone(nn.Module):
    def __init__(
        self,
        model_name: str,
        frozen_modules: Sequence[str] = (),
        dropout: float = 0.0,
        target_hidden_size: int = 512,
        training_use_cache: bool = False,
        use_attention_layer: bool = False,  # New option for attention
        additional_projection: bool = True, # New option to add another projection layer
    ) -> None:
        """
        Initialize the HuggingALIGNLanguageBackbone model with the given configuration.
        
        Args:
            model_name (str): The pre-trained model name or path to load from.
            frozen_modules (Sequence[str]): List of module names to freeze during training.
            dropout (float): Dropout probability for model layers.
            target_hidden_size (int): The target hidden size for the output embeddings.
            training_use_cache (bool): Whether to use cache during training.
            use_attention_layer (bool): Whether to add an attention layer for enhanced features.
            additional_projection (bool): Whether to add an extra projection layer after the attention.
        """
        super().__init__()
        
        if not model_name:
            raise ValueError("model_name must be a non-empty string.")
        
        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache

        # Initialize ALIGN Tokenizer and Model
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.config = AlignTextConfig.from_pretrained(
            model_name, attention_dropout=dropout
        )
        self.model = AlignTextModel.from_pretrained(model_name, config=self.config)

        # Projection layers for hidden size adjustment
        self.projection = nn.Linear(self.config.hidden_size, target_hidden_size)
        if additional_projection:
            self.additional_projection = nn.Linear(target_hidden_size, target_hidden_size)

        # Optional attention layer for enhanced feature extraction
        self.use_attention_layer = use_attention_layer
        if self.use_attention_layer:
            self.attention_layer = nn.MultiheadAttention(embed_dim=target_hidden_size, num_heads=8)

        # Freeze selected modules
        self._freeze_modules()

    def forward_tokenizer(self, texts: List[List[str]]) -> torch.Tensor:
        """
        Tokenizes input text using the ALIGN tokenizer.
        
        Args:
            texts (List[List[str]]): Batch of text inputs.
        
        Returns:
            Tensor: Tokenized inputs on the same device as the model.
        """
        if not all(isinstance(sublist, list) for sublist in texts):
            raise ValueError("Input texts must be a list of lists of strings.")

        flat_text = list(itertools.chain.from_iterable(texts))
        tokens = self.tokenizer(
            text=flat_text,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=512,
        )
        return tokens.to(self.model.device)

    def forward(self, text: List[List[str]]) -> torch.Tensor:
        """
        Encodes text using the ALIGN model and processes embeddings.
        
        Args:
            text (List[List[str]]): Batch of text inputs.
        
        Returns:
            Tensor: Processed embeddings reshaped to batch format.
        """
        if not text or not all(isinstance(t, list) for t in text):
            raise ValueError("Input text must be a non-empty list of lists.")

        num_per_batch = [len(t) for t in text]
        if max(num_per_batch) != min(num_per_batch):
            raise ValueError("All batch sequences must have the same length.")

        tokens = self.forward_tokenizer(text)

        # Forward pass through ALIGN model
        outputs = self.model(**tokens)
        
        # Assuming the outputs contain a 'last_hidden_state'
        hidden_states = outputs.last_hidden_state

        # CLS token embeddings and normalization
        cls_embeddings = hidden_states[:, 0, :]  # CLS token
        cls_embeddings = F.normalize(cls_embeddings, p=2, dim=-1)

        # Apply projection layer for dimensionality adjustment
        projected_embeddings = self.projection(cls_embeddings)

        # Add optional additional projection layer
        if hasattr(self, 'additional_projection'):
            projected_embeddings = self.additional_projection(projected_embeddings)

        # Apply attention layer if enabled
        if self.use_attention_layer:
            # Reshape for multihead attention input (batch_size, sequence_length, feature_dim)
            projected_embeddings = projected_embeddings.unsqueeze(0)  # Add batch dimension
            attention_output, _ = self.attention_layer(projected_embeddings, projected_embeddings, projected_embeddings)
            projected_embeddings = attention_output.squeeze(0)  # Remove batch dimension

        # Reshape embeddings back to batch format (preserving sequence length)
        batch_size = len(text)
        sequence_length = len(text[0])
        reshaped_embeddings = projected_embeddings.view(
            batch_size, sequence_length, projected_embeddings.size(-1)
        )

        return reshaped_embeddings

    def _freeze_modules(self):
        """
        Freezes specific or all modules of the model based on frozen_modules list.
        """
        if not self.frozen_modules:
            return

        if "all" in self.frozen_modules:
            self.model.eval()
            for param in self.model.parameters():
                param.requires_grad = False
            return

        # Freeze specific modules by name
        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def train(self, mode: bool = True):
        """
        Ensures frozen modules remain frozen during training.
        
        Args:
            mode (bool): Whether to set the model to training mode.
        """
        super().train(mode)
        self._freeze_modules()

    def configure_optimizer_and_scheduler(self, train_dataloader, num_epochs: int, lr: float = 5e-5):
        """
        Configures the optimizer and learning rate scheduler for training.

        Args:
            train_dataloader (DataLoader): The training dataloader.
            num_epochs (int): Number of training epochs.
            lr (float): Learning rate for the optimizer.

        Returns:
            optimizer (AdamW): The AdamW optimizer.
            scheduler (Scheduler): The learning rate scheduler.
        """
        optimizer = AdamW(self.parameters(), lr=lr)
        total_steps = len(train_dataloader) * num_epochs
        scheduler = get_linear_schedule_with_warmup(
            optimizer, num_warmup_steps=0, num_training_steps=total_steps
        )
        return optimizer, scheduler



# ++++++++++++++++++++= HuggingALIGNLanguageBackboneWithCoCoOp  ++++++++++++++++++++=



@MODELS.register_module()
class HuggingALIGNLanguageBackboneWithCoCoOp(nn.Module):
    def __init__(
        self,
        model_name: str,
        frozen_modules: Sequence[str] = (),
        dropout: float = 0.0,
        target_hidden_size: int = 512,
        num_prompts: int = 5,  # Number of learnable soft prompt tokens
        prompt_dim: int = 256,  # Dimension of intermediate prompt embeddings
        training_use_cache: bool = False,
        init_cfg: dict = None,
    ) -> None:
        super().__init__()

        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache

        # Initialize ALIGN tokenizer and model
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.config = AlignTextConfig.from_pretrained(
            model_name, attention_dropout=dropout
        )
        self.model = AlignTextModel.from_pretrained(model_name, config=self.config)

        # Projection layer to adjust hidden size
        self.projection = nn.Linear(self.config.hidden_size, target_hidden_size)

        # Conditional prompt generator (CoCoOp)
        self.prompt_generator = nn.Sequential(
            nn.Linear(target_hidden_size * 2, prompt_dim),  # Combine text + image features
            nn.ReLU(),
            nn.Linear(prompt_dim, num_prompts * target_hidden_size),
        )

        # Trainable soft prompt embeddings
        self.soft_prompt_embeddings = nn.Parameter(
            torch.randn(num_prompts, target_hidden_size)
        )

        # Dynamic adjustment for image features
        self.dynamic_projection = None  # Dynamically created during forward pass

        # Ensure frozen modules remain frozen
        self._freeze_modules()

    def forward_tokenizer(self, texts: List[List[str]]) -> Tensor:
        """
        Tokenizes input text using ALIGN tokenizer.
        """
        flat_text = list(itertools.chain.from_iterable(texts))
        tokens = self.tokenizer(
            text=flat_text,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=512,
        )
        return tokens.to(self.model.device)

    def forward(self, text: List[List[str]], image_features: Tensor = None) -> Tensor:
        """
        Forward method to integrate CoCoOp for dynamic prompting.

        Args:
            text (List[List[str]]): Batch of text inputs.
            image_features (Tensor, optional): Visual features for generating conditional prompts.

        Returns:
            Tensor: Processed embeddings with conditional soft prompts.
        """
        if not text or not all(isinstance(t, list) for t in text):
            raise ValueError("Input text must be a non-empty list of lists.")

        # Tokenize text inputs
        tokens = self.forward_tokenizer(text)
        outputs = self.model(**tokens)

        # Extract hidden states
        hidden_states = outputs.last_hidden_state  # Shape: [batch_size, seq_len, hidden_size]

        # Extract CLS token embeddings
        cls_embeddings = hidden_states[:, 0, :]  # Shape: [batch_size, hidden_size]
        cls_embeddings = F.normalize(cls_embeddings, p=2, dim=-1)

        # Project CLS embeddings
        projected_embeddings = self.projection(cls_embeddings)  # Shape: [batch_size, target_hidden_size]

        # Print debugging information
        print(f"Projected embeddings shape: {projected_embeddings.shape}")
        if image_features is not None:
            print(f"Image features shape: {image_features.shape}")

        # Flatten image features if required
        if image_features is not None:
            image_features = image_features.view(image_features.size(0), -1)  # Flatten to [batch_size, feature_dim]
            print(f"Flattened image features shape: {image_features.shape}")

            # Dynamic adjustment for image features
            if self.dynamic_projection is None or self.dynamic_projection.in_features != image_features.size(-1):
                print("Adjusting dynamic projection layer for image features.")
                self.dynamic_projection = nn.Linear(image_features.size(-1), projected_embeddings.size(-1)).to(
                    projected_embeddings.device
                )

            # Project image features to match text feature size
            image_features = self.dynamic_projection(image_features)
            print(f"Projected image features shape: {image_features.shape}")

            # Concatenate text and image features
            fused_features = torch.cat([projected_embeddings, image_features], dim=-1)  # Shape: [batch_size, target_hidden_size * 2]
            print(f"Fused features shape: {fused_features.shape}")

            # Generate conditional prompts
            conditional_prompts = self.prompt_generator(fused_features)  # Shape: [batch_size, num_prompts * target_hidden_size]
            conditional_prompts = conditional_prompts.view(
                -1, self.soft_prompt_embeddings.size(0), projected_embeddings.size(-1)
            )  # Shape: [batch_size, num_prompts, target_hidden_size]
            print(f"Conditional prompts shape: {conditional_prompts.shape}")
        else:
            # Use pre-defined soft prompts (default)
            conditional_prompts = self.soft_prompt_embeddings.unsqueeze(0).repeat(
                projected_embeddings.size(0), 1, 1
            )  # Shape: [batch_size, num_prompts, target_hidden_size]

        # Combine prompts with CLS embeddings
        combined_embeddings = torch.cat(
            [conditional_prompts[:, 0, :], projected_embeddings], dim=-1
        )  # Shape: [batch_size, target_hidden_size * 2]

        print(f"Combined embeddings shape: {combined_embeddings.shape}")
        return combined_embeddings

    def _freeze_modules(self):
        """
        Freezes specific or all modules of the model.
        """
        if len(self.frozen_modules) == 0:
            return

        if "all" in self.frozen_modules:
            self.model.eval()
            for _, module in self.model.named_modules():
                module.eval()
                for param in module.parameters():
                    param.requires_grad = False
            return

        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def train(self, mode=True):
        """
        Ensures frozen modules remain frozen during training.
        """
        super().train(mode)
        self._freeze_modules()




# ++++++++++++++++++++= Enhace Align ++++++++++++++++++++=











@MODELS.register_module()
class HuggingBLIP2LanguageBackbone(nn.Module):
    def __init__(
        self,
        model_name: str,
        frozen_modules: Sequence[str] = (),
        dropout: float = 0.0,
        target_hidden_size: int = 512,
        training_use_cache: bool = False,
    ) -> None:
        super().__init__()
        if not model_name:
            raise ValueError("model_name must be a non-empty string.")

        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache

        # Initialize BLIP-2 Processor and Model
        self.processor = Blip2Processor.from_pretrained(model_name)
        self.model = Blip2Model.from_pretrained(model_name)

        # BLIP-2 hidden size
        self.hidden_size = self.model.config.text_config.hidden_size

        # Projection layer for hidden size adjustment
        self.projection = nn.Linear(self.hidden_size, target_hidden_size)

        # Ensure frozen modules remain frozen
        self._freeze_modules()

    def forward_tokenizer(self, texts: List[List[str]]) -> dict:
        """
        Tokenizes input text using BLIP-2 tokenizer.

        Args:
            texts (List[List[str]]): Batch of text inputs.

        Returns:
            dict: Tokenized inputs on the same device as the model.
        """
        if not all(isinstance(sublist, list) for sublist in texts):
            raise ValueError("Input texts must be a list of lists of strings.")

        flat_text = list(itertools.chain.from_iterable(texts))
        tokens = self.processor(
            text=flat_text,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=512,
        )
        return tokens.to(self.model.device)

    def forward(self, text: List[List[str]]) -> torch.Tensor:
        """
        Encodes text using BLIP-2 and processes embeddings.

        Args:
            text (List[List[str]]): Batch of text inputs.

        Returns:
            torch.Tensor: Processed embeddings reshaped to batch format.
        """
        if not text or not all(isinstance(t, list) for t in text):
            raise ValueError("Input text must be a non-empty list of lists.")

        num_per_batch = [len(t) for t in text]
        if max(num_per_batch) != min(num_per_batch):
            raise ValueError("All batch sequences must have the same length.")

        tokens = self.forward_tokenizer(text)

        # Forward pass through BLIP-2 model
        outputs = self.model.text_encoder(**tokens)
        hidden_states = outputs.last_hidden_state

        # CLS token embeddings and normalization
        cls_embeddings = hidden_states[:, 0, :]  # CLS token
        cls_embeddings = F.normalize(cls_embeddings, p=2, dim=-1)

        # Apply projection layer for dimensionality adjustment
        projected_embeddings = self.projection(cls_embeddings)

        # Reshape embeddings back to batch format
        batch_size = len(text)
        sequence_length = len(text[0])
        reshaped_embeddings = projected_embeddings.view(
            batch_size, sequence_length, projected_embeddings.size(-1)
        )

        return reshaped_embeddings

    def _freeze_modules(self):
        """Freezes specific or all modules of the model."""
        if not self.frozen_modules:
            return  # No modules to freeze

        if "all" in self.frozen_modules:
            self.model.eval()
            for param in self.model.parameters():
                param.requires_grad = False
            return

        # Freeze specified modules
        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def train(self, mode: bool = True):
        """
        Ensures frozen modules remain frozen during training.

        Args:
            mode (bool): Whether to set the model to training mode.
        """
        super().train(mode)
        self._freeze_modules()


@MODELS.register_module()
class HuggingALIGNLanguageBackboneWithPrompts(BaseModule):
    def __init__(self,
                 model_name: str,
                 frozen_modules: Sequence[str] = (),
                 dropout: float = 0.0,
                 target_hidden_size: int = 512,  # Adjust for downstream tasks
                 prompt_length: int = 5,  # Number of soft prompt tokens
                 training_use_cache: bool = False) -> None:
        super().__init__()

        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache

        # Initialize ALIGN Tokenizer and Model
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AlignTextModel.from_pretrained(
            model_name,
            config=AlignTextConfig.from_pretrained(model_name, attention_dropout=dropout)
        )

        # Dynamically detect the hidden size
        self.hidden_size = self.model.config.hidden_size
        print(f"Detected hidden size: {self.hidden_size}")

        # Trainable soft prompts
        self.soft_prompt_embeddings = nn.Parameter(
            torch.randn(prompt_length, self.hidden_size)  # Random initialization
        )

        # Projection layer for dimensionality adjustment
        self.projection = nn.Linear(self.hidden_size, target_hidden_size)

        # Optional fusion layer for specific downstream tasks
        self.fusion_projection = nn.Linear(target_hidden_size, target_hidden_size)

        # Dropout for regularization
        self.dropout = nn.Dropout(p=dropout)

        # Apply module freezing if needed
        self._freeze_modules()

    def forward_tokenizer(self, texts: List[List[str]]) -> Dict[str, Tensor]:
        """Tokenizes input text using ALIGN tokenizer."""
        flat_text = list(itertools.chain(*texts))
        tokens = self.tokenizer(
            text=flat_text,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=512
        )
        return tokens.to(self.model.device)

    def forward(self, text: List[List[str]]) -> Tensor:
        """Encodes text using ALIGN with soft prompts and processes embeddings."""
        tokens = self.forward_tokenizer(text)

        # Extract input embeddings
        input_embeddings = self.model.get_input_embeddings()(tokens.input_ids)  # [batch_size, seq_len, hidden_size]
        batch_size, seq_len, hidden_size = input_embeddings.shape
        print(f"Input embeddings shape: {input_embeddings.shape}")

        # Add soft prompts
        soft_prompts = self.dropout(self.soft_prompt_embeddings.unsqueeze(0).expand(batch_size, -1, -1))  # [batch_size, prompt_len, hidden_size]
        print(f"Soft prompts shape: {soft_prompts.shape}")

        input_embeddings = torch.cat([soft_prompts, input_embeddings], dim=1)  # [batch_size, prompt_len + seq_len, hidden_size]
        print(f"Concatenated input embeddings shape: {input_embeddings.shape}")

        # Adjust attention mask
        attention_mask = tokens.attention_mask  # [batch_size, seq_len]
        prompt_mask = torch.ones(batch_size, self.soft_prompt_embeddings.size(0), device=attention_mask.device)  # [batch_size, prompt_len]
        attention_mask = torch.cat([prompt_mask, attention_mask], dim=1)  # [batch_size, prompt_len + seq_len]
        print(f"Attention mask shape: {attention_mask.shape}")

        # Forward pass through ALIGN model
        outputs = self.model(inputs_embeds=input_embeddings, attention_mask=attention_mask)
        hidden_states = outputs.last_hidden_state  # [batch_size, prompt_len + seq_len, hidden_size]
        print(f"Hidden state shape: {hidden_states.shape}")

        # Extract CLS token embeddings
        cls_embeddings = hidden_states[:, 0, :]  # [batch_size, hidden_size]
        cls_embeddings = F.normalize(cls_embeddings, p=2, dim=-1)
        

        # Apply projection layer
        projected_embeddings = self.projection(cls_embeddings)  # [batch_size, target_hidden_size]
        print(f"Projected embeddings shape: {projected_embeddings.shape}")

        return projected_embeddings

    def _freeze_modules(self):
        """Freezes specific or all modules of the model."""
        if not self.frozen_modules:
            return

        if "all" in self.frozen_modules:
            for param in self.model.parameters():
                param.requires_grad = False
            return

        for name, module in self.model.named_modules():
            if any(name.startswith(fm) for fm in self.frozen_modules):
                for param in module.parameters():
                    param.requires_grad = False

    def train(self, mode=True):
        """Ensures frozen modules remain frozen during training."""
        super().train(mode)
        self._freeze_modules()

@MODELS.register_module()
class HuggingQwenLanguageBackbone(BaseModule):
    def __init__(self,
                 model_name: str,
                 frozen_modules: Sequence[str] = (),
                 dropout: float = 0.0,
                 training_use_cache: bool = False,
                 init_cfg: OptMultiConfig = None) -> None:

        super().__init__(init_cfg=init_cfg)

        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache

        # Initialize tokenizer and model specific to Qwen1.5-0.5B
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        self.model = AutoModel.from_pretrained(
            model_name,
            output_hidden_states=True,  # Ensure hidden states are available
            output_attentions=False,    # Disable attention outputs unless needed
        )

        self._freeze_modules()

    def forward_tokenizer(self, texts):
        """Tokenizes the input texts for the model."""
        flat_text = list(itertools.chain(*texts))
        tokens = self.tokenizer(
            text=flat_text,
            return_tensors="pt",
            padding=True,
            truncation=True,
            max_length=512,  # Adjust as per memory constraints
        )
        return tokens.to(device=self.model.device)

    def forward(self, text: List[List[str]]) -> Tensor:
        """Encodes text using Qwen1.5-0.5B and extracts embeddings."""
        num_per_batch = [len(t) for t in text]
        assert max(num_per_batch) == min(num_per_batch), (
            "Number of sequences must be equal in each batch."
        )

        tokens = self.forward_tokenizer(text)

        # Forward pass through the Qwen1.5-0.5B model
        outputs = self.model(**tokens)
        hidden_states = outputs.hidden_states[-1]  # Use the last hidden state

        # Extract CLS token embeddings and normalize
        cls_embeddings = hidden_states[:, 0, :]  # CLS token
        normalized_embeddings = F.normalize(cls_embeddings, p=2, dim=-1)

        # Reshape embeddings back to batch format
        reshaped_embeddings = normalized_embeddings.view(-1, num_per_batch[0], normalized_embeddings.size(-1))
        return reshaped_embeddings

    def _freeze_modules(self):
        """Freezes specific or all modules of the model."""
        if len(self.frozen_modules) == 0:
            return

        if self.frozen_modules[0] == "all":
            self.model.eval()
            for _, param in self.model.named_parameters():
                param.requires_grad = False
            return

        # Freeze specific submodules
        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def train(self, mode=True):
        """Ensures frozen modules remain frozen during training."""
        super().train(mode)
        self._freeze_modules()
    
@MODELS.register_module()
class EnhancedTextCLIPBackbone(BaseModule):
    def __init__(self,
                model_name: str,
                frozen_modules: Sequence[str] = (),
                enhanced_text_model_name: str = "D:\\YOLO\\YOLO-World\\roberta-base",
                dropout: float = 0.0,
                training_use_cache: bool = False,
                init_cfg: OptMultiConfig = None) -> None:
        super().__init__(init_cfg=init_cfg)

        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache
        
        # Initialize CLIP Text Model
        self.clip_tokenizer = AutoTokenizer.from_pretrained(model_name)
        clip_config = CLIPTextConfig.from_pretrained(model_name, attention_dropout=dropout)
        self.clip_text_model = CLIPTextModel.from_pretrained(model_name, config=clip_config)

        # Initialize RoBERTa as Enhanced Text Encoder
        self.enhanced_tokenizer = AutoTokenizer.from_pretrained(enhanced_text_model_name)
        self.enhanced_text_model = RobertaModel.from_pretrained(enhanced_text_model_name)

        # Fusion Layer to combine CLIP and RoBERTa Embeddings
        self.fusion_layer = nn.Linear(
            self.clip_text_model.config.hidden_size + self.enhanced_text_model.config.hidden_size,
            self.clip_text_model.config.hidden_size
        )
        self._freeze_modules()

    def forward_tokenizer(self, texts: List[List[str]]):
        # Tokenize text for both CLIP and RoBERTa
        flat_text = list(itertools.chain(*texts))
        clip_tokens = self.clip_tokenizer(text=flat_text, return_tensors='pt', padding=True)
        enhanced_tokens = self.enhanced_tokenizer(text=flat_text, return_tensors='pt', padding=True)
        return clip_tokens.to(self.clip_text_model.device), enhanced_tokens.to(self.enhanced_text_model.device)

    def forward(self, text: List[List[str]]) -> Tensor:
        """Forward pass that includes soft prompt embeddings."""
        # Tokenize input text
        tokens = self.forward_tokenizer(text)

        # Extract input embeddings from the model's token embeddings
        input_embeddings = self.model.embeddings.word_embeddings(tokens.input_ids)  # Shape: [batch_size, seq_len, hidden_size]
        batch_size = input_embeddings.size(0)

        # Add soft prompts to the input embeddings
        soft_prompts = self.soft_prompt_embeddings.unsqueeze(0).expand(batch_size, -1, -1)
        input_embeddings = torch.cat([soft_prompts, input_embeddings], dim=1)

        # Adjust attention mask for the added prompts
        attention_mask = tokens.attention_mask
        prompt_mask = torch.ones(batch_size, self.soft_prompt_embeddings.size(0), device=attention_mask.device)
        attention_mask = torch.cat([prompt_mask, attention_mask], dim=1)  # Shape: [batch_size, prompt_len + seq_len]

        # Forward pass through the model
        outputs = self.model(input_ids=None, inputs_embeds=input_embeddings, attention_mask=attention_mask)
        hidden_states = outputs.last_hidden_state  # Shape: [batch_size, seq_len + prompt_len, hidden_size]

        # Extract CLS token embedding (the first token embedding)
        cls_embeddings = hidden_states[:, 0, :]  # Shape: [batch_size, hidden_size]
        cls_embeddings = F.normalize(cls_embeddings, p=2, dim=-1)

        # Apply projection layer to adjust the dimensionality
        projected_embeddings = self.projection(cls_embeddings)

        return projected_embeddings
    
    def _freeze_modules(self):
        if len(self.frozen_modules) == 0:
            # No modules to freeze
            return

        # Freeze all modules if "all" is specified
        if self.frozen_modules[0] == "all":
            # Freeze CLIP text model
            self.clip_text_model.eval()
            for _, module in self.clip_text_model.named_modules():
                for param in module.parameters():
                    param.requires_grad = False

            # Freeze RoBERTa enhanced text model
            self.enhanced_text_model.eval()
            for _, module in self.enhanced_text_model.named_modules():
                for param in module.parameters():
                    param.requires_grad = False
            return

        # Freeze specified modules
        for name, module in self.clip_text_model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

        for name, module in self.enhanced_text_model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    module.eval()
                    for param in module.parameters():
                        param.requires_grad = False
                    break

@MODELS.register_module()
class EnhancedTextCLIPBackboneV2(BaseModule):
    def __init__(self,
                model_name: str,
                frozen_modules: Sequence[str] = (),
                enhanced_text_model_name: str = "D:\\YOLO\\YOLO-World\\beit-large-patch16-224",
                dropout: float = 0.0,
                training_use_cache: bool = False,
                init_cfg: OptMultiConfig = None) -> None:
        super().__init__(init_cfg=init_cfg)

        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache
        
        # Initialize CLIP Text Model
        self.clip_tokenizer = AutoTokenizer.from_pretrained(model_name)
        clip_config = CLIPTextConfig.from_pretrained(model_name, attention_dropout=dropout)
        self.clip_text_model = CLIPTextModel.from_pretrained(model_name, config=clip_config)

        # Initialize BEiT as Enhanced Text Encoder
        self.enhanced_tokenizer = XLMRobertaTokenizer("D:\\YOLO\\YOLO-World\\beit-large-patch16-224\\beit3.spm")
        self.enhanced_text_model = BeitModel.from_pretrained(enhanced_text_model_name)

        # Fusion Layer to combine CLIP and BEiT Embeddings
        self.fusion_layer = nn.Linear(
            self.clip_text_model.config.hidden_size + self.enhanced_text_model.config.hidden_size,
            self.clip_text_model.config.hidden_size
        )
        self._freeze_modules()

    def forward_tokenizer(self, texts: List[List[str]]):
        # Tokenize text for both CLIP and BEiT
        flat_text = list(itertools.chain(*texts))
        clip_tokens = self.clip_tokenizer(text=flat_text, return_tensors='pt', padding=True)
        enhanced_tokens = self.enhanced_tokenizer(text=flat_text, return_tensors='pt', padding=True)
        return clip_tokens.to(self.clip_text_model.device), enhanced_tokens.to(self.enhanced_text_model.device)

    def forward(self, text: List[List[str]]) -> torch.Tensor:
        # Process text with both CLIP and BEiT
        num_per_batch = [len(t) for t in text]
        assert max(num_per_batch) == min(num_per_batch), "Batch sequence lengths must match"
        
        clip_tokens, enhanced_tokens = self.forward_tokenizer(text)
        
        # CLIP Text Embeddings
        clip_outputs = self.clip_text_model(**clip_tokens)
        clip_embeds = clip_outputs.last_hidden_state[:, 0, :]  # CLS token embedding for CLIP
        clip_embeds = clip_embeds / clip_embeds.norm(p=2, dim=-1, keepdim=True)

        # BEiT Text Embeddings
        enhanced_outputs = self.enhanced_text_model(**enhanced_tokens)
        enhanced_embeds = enhanced_outputs.last_hidden_state[:, 0, :]  # CLS token embedding for BEiT
        enhanced_embeds = enhanced_embeds / enhanced_embeds.norm(p=2, dim=-1, keepdim=True)

        # Concatenate and fuse embeddings
        combined_embeds = torch.cat([clip_embeds, enhanced_embeds], dim=-1)
        fused_embeds = self.fusion_layer(combined_embeds)
        fused_embeds = fused_embeds.reshape(-1, num_per_batch[0], fused_embeds.shape[-1])
        
        return fused_embeds
    
    def _freeze_modules(self):
        if len(self.frozen_modules) == 0:
            # No modules to freeze
            return
        
        if "all" in self.frozen_modules:
            # Freeze all parameters
            for param in self.clip_text_model.parameters():
                param.requires_grad = False
            for param in self.enhanced_text_model.parameters():
                param.requires_grad = False
        else:
            # Freeze specific submodules
            for name, module in self.clip_text_model.named_modules():
                if any(name.startswith(fm) for fm in self.frozen_modules):
                    for param in module.parameters():
                        param.requires_grad = False
            for name, module in self.enhanced_text_model.named_modules():
                if any(name.startswith(fm) for fm in self.frozen_modules):
                    for param in module.parameters():
                        param.requires_grad = False

    def train(self, mode=True):
        super().train(mode)
        self._freeze_modules()

@MODELS.register_module()
class PseudoLanguageBackbone(BaseModule):
    """Pseudo Language Backbone
    Args:
        text_embed_path (str): path to the text embedding file
    """

    def __init__(self,
                 text_embed_path: str = "",
                 test_embed_path: str = None,
                 init_cfg: OptMultiConfig = None):
        super().__init__(init_cfg)
        # {text:embed}
        self.text_embed = torch.load(text_embed_path, map_location='cpu')
        if test_embed_path is None:
            self.test_embed = self.text_embed
        else:
            self.test_embed = torch.load(test_embed_path)
        self.register_buffer("buff", torch.zeros([
            1,
        ]))

    def forward_cache(self, text: List[List[str]]) -> Tensor:
        if not hasattr(self, "cache"):
            self.cache = self.forward_text(text)
        return self.cache

    def forward(self, text: List[List[str]]) -> Tensor:
        if self.training:
            return self.forward_text(text)
        else:
            return self.forward_cache(text)

    def forward_text(self, text: List[List[str]]) -> Tensor:
        num_per_batch = [len(t) for t in text]
        assert max(num_per_batch) == min(num_per_batch), (
            'number of sequences not equal in batch')
        text = list(itertools.chain(*text))
        if self.training:
            text_embed_dict = self.text_embed
        else:
            text_embed_dict = self.test_embed
        text_embeds = torch.stack(
            [text_embed_dict[x.split("/")[0]] for x in text])
        # requires no grad and force to float
        text_embeds = text_embeds.to(
            self.buff.device).requires_grad_(False).float()
        text_embeds = text_embeds.reshape(-1, num_per_batch[0],
                                          text_embeds.shape[-1])
        return text_embeds


# @MODELS.register_module()
# class MultiModalYOLOBackbone(BaseModule): 
#     def __init__(self,
#                  image_model: ConfigType,
#                  text_model: ConfigType,
#                  frozen_stages: int = -1,
#                  with_text_model: bool = True,
#                  init_cfg: OptMultiConfig = None) -> None:
#         super().__init__(init_cfg)
#         self.with_text_model = with_text_model

#         self.image_model = MODELS.build(image_model)
#         if self.with_text_model:
#             self.text_model = MODELS.build(text_model)
#         else:
#             self.text_model = None
#         self.frozen_stages = frozen_stages
#         self._freeze_stages()

#     def _freeze_stages(self):
#         """Freeze the specified stages of the image model."""
#         if self.frozen_stages >= 0:
#             for i in range(self.frozen_stages + 1):
#                 m = getattr(self.image_model, self.image_model.layers[i])
#                 m.eval()
#                 for param in m.parameters():
#                     param.requires_grad = False

#     def train(self, mode: bool = True):
#         """Convert the model into training mode while keeping normalization layers frozen."""
#         super().train(mode)
#         self._freeze_stages()

    # def forward(self, image: Tensor,
    #             text: List[List[str]]) -> Tuple[Tuple[Tensor], Tensor]:
    #     img_feats = self.image_model(image)
    #     # print("img_feats >>>>> ", img_feats)
        
    #     global global_image_model
    #     global_image_model = img_feats 
    #     if self.with_text_model:
    #         # print("img_feats shape >>>>> ", img_feats.shape)
    #         # print("img_feats size >>>>> ", img_feats.size())

    #         # print(" text shape >>>>> ", text.shape)
    #         # print(" text size >>>>> ", text.size())

    #         txt_feats = self.text_model(text, img_feats)
    #         # txt_feats = self.text_model(text)

    #         # print("img_feats size after >>>>> ", img_feats[0].shape)
    #         # print("img_feats size after >>>>> ", img_feats[0].size())

    #         # print(" txt_feats shape after >>>>> ", txt_feats.shape)
    #         # print(" txt_feats size after >>>>> ", txt_feats.size())

    #         return img_feats, txt_feats
    #     else:
    #         return img_feats, txt_feats


    # def forward(self, image: Tensor,
    #     text: List[List[str]]) -> Tuple[Tuple[Tensor], Tensor]:
    #     img_feats = self.image_model(image)
    #     if self.with_text_model:
    #         txt_feats = self.text_model(text, img_feats)
    #         return img_feats, txt_feats
    #     else:
    #         return img_feats, None
        
    # def forward_text(self, text: List[List[str]]) -> Tensor:
    #     assert self.with_text_model, "forward_text() requires a text model"
    #     txt_feats = self.text_model(text)
    #     return txt_feats

    # def forward_image(self, image: Tensor) -> Tuple[Tensor]:
    #     return self.image_model(image)
            
    
# Original Code  
@MODELS.register_module()
class MultiModalYOLOBackbone(BaseModule): 
    def __init__(self,
                 image_model: ConfigType,
                 text_model: ConfigType,
                 frozen_stages: int = -1,
                 with_text_model: bool = True,
                 init_cfg: OptMultiConfig = None) -> None:
        super().__init__(init_cfg)
        self.with_text_model = with_text_model
        # print("image_model :::", image_model)
        # print("text_model :::", text_model)

        
        self.image_model = MODELS.build(image_model)

        # global global_image_model 
        # global_image_model = self.image_model

        if self.with_text_model:
            self.text_model = MODELS.build(text_model)
        else:
            self.text_model = None
        self.frozen_stages = frozen_stages
        self._freeze_stages()

    def _freeze_stages(self):
        """Freeze the parameters of the specified stage so that they are no
        longer updated."""
        if self.frozen_stages >= 0:
            for i in range(self.frozen_stages + 1):
                m = getattr(self.image_model, self.image_model.layers[i])
                m.eval()
                for param in m.parameters():
                    param.requires_grad = False

    def train(self, mode: bool = True):
        """Convert the model into training mode while keep normalization layer
        frozen."""
        print(" Convert the model into training mode while keep normalization layer frozen")
        super().train(mode)
        self._freeze_stages()

    def forward(self, image: Tensor, text: List[List[str]]) -> Tuple[Tuple[Tensor], Tensor]:
        img_feats = self.image_model(image)

        global global_image_model
        global_image_model = img_feats 

        # print("global_image_model ::::", global_image_model) 
        # global_image_model Have value 
        # print("ENNNNNNNN2")

        if self.with_text_model:
            txt_feats = self.text_model(text)
            return img_feats, txt_feats
        else:
            return img_feats, None

    def forward_text(self, text: List[List[str]]) -> Tensor:
        assert self.with_text_model, "forward_text() requires a text model"
        txt_feats = self.text_model(text)
        return txt_feats

    def forward_image(self, image: Tensor) -> Tuple[Tensor]:
        return self.image_model(image)

## SBERT MODEL TEST IN PROGRESS
@MODELS.register_module()
class HuggingSBERTLanguageBackbone(nn.Module):
    def __init__(self,
                 model_name: str,
                 frozen_modules: Sequence[str] = (),
                 dropout: float = 0.0,
                 training_use_cache: bool = False) -> None:
        super().__init__()
        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache

        # Initialize the tokenizer
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)

        # Load the SBERT model
        self.model = AutoModel.from_pretrained(model_name)

        # Set dropout if supported by the model's configuration
        if hasattr(self.model.config, 'attention_probs_dropout_prob'):
            self.model.config.attention_probs_dropout_prob = dropout
        if hasattr(self.model.config, 'hidden_dropout_prob'):
            self.model.config.hidden_dropout_prob = dropout

        # Correctly initialize the fusion layer to match the SBERT output size
        self.hidden_size = self.model.config.hidden_size
        self.fusion_layer = nn.Linear(self.hidden_size, 256)  # Match downstream expectations

        # Freeze specified modules if necessary
        self._freeze_modules()

    def forward_tokenizer(self, texts: List[List[str]]):
        """Tokenizes input text."""
        flat_text = list(itertools.chain(*texts))
        tokens = self.tokenizer(
            text=flat_text, 
            return_tensors='pt', 
            padding=True, 
            truncation=True, 
            max_length=512  # Ensure compatibility with model constraints
        )
        return tokens.to(self.model.device)

    def mean_pooling(self, model_output, attention_mask):
        """Mean pooling - take attention mask into account for correct averaging."""
        token_embeddings = model_output[0]  # First element of model_output contains all token embeddings
        input_mask_expanded = attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()
        return torch.sum(token_embeddings * input_mask_expanded, dim=1) / torch.clamp(input_mask_expanded.sum(dim=1), min=1e-9)

    def forward(self, text: List[List[str]]) -> torch.Tensor:
        """Processes the text and outputs normalized embeddings."""
        # Ensure equal sequence lengths in the batch
        num_per_batch = [len(t) for t in text]
        assert max(num_per_batch) == min(num_per_batch), "Number of sequences per batch must be equal"

        # Tokenize the input text
        tokens = self.forward_tokenizer(text)

        # Pass tokens through the model to get outputs
        with torch.no_grad():
            model_output = self.model(**tokens)

        # Debugging: Check hidden states
        print(f"Shape of token embeddings (model_output[0]): {model_output[0].shape}")  # Should be [batch_size, seq_len, hidden_size]

        # Perform mean pooling
        sentence_embeddings = self.mean_pooling(model_output, tokens['attention_mask'])
        print(f"Shape after pooling: {sentence_embeddings.shape}")  # Should be [batch_size, hidden_size]

        # Normalize the embeddings
        sentence_embeddings = F.normalize(sentence_embeddings, p=2, dim=1)
        print(f"Shape after normalization: {sentence_embeddings.shape}")  # Should be [batch_size, hidden_size]

        # Pass through the fusion layer
        fused_embeddings = self.fusion_layer(sentence_embeddings)
        print(f"Shape after fusion layer: {fused_embeddings.shape}")  # Should be [batch_size, 256]

        # Reshape for compatibility if needed
        reshaped_feats = fused_embeddings.view(-1, num_per_batch[0], fused_embeddings.size(-1))
        print(f"Shape after reshaping: {reshaped_feats.shape}")  # Should be [batch_size, num_sequences, 256]

        # Flatten reshaped_feats
        flat_feats = reshaped_feats.view(-1, fused_embeddings.size(-1))
        print(f"Shape after flattening: {flat_feats.shape}")  # [total_sequences, 256]

        # Ensure compatibility with downstream requirements
        required_size = 512  # Adjust based on the downstream model
        if flat_feats.size(0) > required_size:
            flat_feats = flat_feats[:required_size]  # Select the first `required_size` sequences
        elif flat_feats.size(0) < required_size:
            padding = torch.zeros((required_size - flat_feats.size(0), flat_feats.size(1)),
                                device=flat_feats.device)
            flat_feats = torch.cat([flat_feats, padding], dim=0)

        # After ensuring `flat_feats` is `[512, 256]`
        print(f"Final shapes for downstream: {flat_feats.shape}")  # Should be [512, 256]

        # Downstream layer compatibility
        # If downstream expects [256, 512], transpose the input
        flat_feats = flat_feats.T  # Transpose to [256, 512] if required
        print(f"Final shape after transpose: {flat_feats.shape}")  # Verify dimensions
        return flat_feats

    def _freeze_modules(self):
        """Freezes specified modules of the model."""
        if len(self.frozen_modules) == 0:
            return

        if self.frozen_modules[0] == "all":
            for param in self.model.parameters():
                param.requires_grad = False
            return

        # Freeze specific modules
        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def train(self, mode=True):
        """Ensures frozen modules remain frozen during training."""
        super().train(mode)
        self._freeze_modules()

## BEit3 MODEL TEST IN PROGRESS
@MODELS.register_module()
class HuggingBEiT3LanguageBackbone(nn.Module):
    def __init__(self,
                 model_name: str = 'microsoft/beit-large-patch16-224', 
                 frozen_modules: Sequence[str] = (),
                 dropout: float = 0.0,
                 training_use_cache: bool = False) -> None:
        super().__init__()
        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache
        self.tokenizer = AutoTokenizer.from_pretrained(model_name)
        
        # Load BEiT-3 model in "text-only" mode
        self.model = AutoModelForMaskedLM.from_pretrained(model_name)
        
        # Apply dropout settings if necessary (BEiT-3 may use specific configuration parameters)
        if hasattr(self.model.config, 'attention_probs_dropout_prob'):  
            self.model.config.attention_probs_dropout_prob = dropout
        if hasattr(self.model.config, 'hidden_dropout_prob'):
            self.model.config.hidden_dropout_prob = dropout

        # Freeze specified layers if needed
        self._freeze_modules()

    def forward_tokenizer(self, texts: List[str]):
        # Tokenizes input texts and stores on the correct device
        if not hasattr(self, 'text'):
            text = list(itertools.chain(*texts))
            text = self.tokenizer(text=text, return_tensors='pt', padding=True, truncation=True)
            self.text = text.to(device=self.model.device)
        return self.text

    def forward(self, text: List[List[str]]) -> Tensor:
        # Check consistency in batch dimensions
        num_per_batch = [len(t) for t in text]
        assert max(num_per_batch) == min(num_per_batch), (
            'number of sequences not equal in batch')

        # Flatten, tokenize, and move to device
        text = list(itertools.chain(*text))
        text = self.tokenizer(text=text, return_tensors='pt', padding=True, truncation=True)
        text = text.to(device=self.model.device)
        
        # Forward pass through BEiT-3 text encoder
        outputs = self.model(**text)
        # Get embeddings from the final layer, apply pooling to get sentence embeddings
        txt_feats = outputs.last_hidden_state.mean(dim=1)  # Mean pooling for sentence-level embeddings
        
        # Normalize embeddings for contrastive tasks
        txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
        
        # Reshape to match input batch structure
        txt_feats = txt_feats.reshape(-1, num_per_batch[0], txt_feats.shape[-1])
        return txt_feats

    def _freeze_modules(self):
        # Freezes specified layers of the BEiT-3 model
        if len(self.frozen_modules) == 0:
            return
        
        if self.frozen_modules[0] == "all":
            for param in self.model.parameters():
                param.requires_grad = False
            return

        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def train(self, mode=True):
        super().train(mode)
        self._freeze_modules()

## HuggingBeitImageBackbone MODEL TEST IN PROGRESS
@MODELS.register_module()
class HuggingBeitImageBackbone(nn.Module):
    def __init__(self,
                 model_name: str,
                 frozen_modules: Sequence[str] = (),
                 dropout: float = 0.0,
                 training_use_cache: bool = False) -> None:
        super().__init__()
        
        self.frozen_modules = frozen_modules
        self.training_use_cache = training_use_cache
        self.tokenizer = XLMRobertaTokenizer("D:\\YOLO\\YOLO-World\\beit-large-patch16-224\\beit3.spm")
        
        # Set up the BEiT configuration with dropout adjustments
        beit_config = BeitConfig.from_pretrained(model_name)
        beit_config.hidden_dropout_prob = dropout
        self.model = BeitModel.from_pretrained(model_name, config=beit_config)
        
        # Apply dropout settings if necessary
        if hasattr(self.model.config, 'attention_probs_dropout_prob'):
            self.model.config.attention_probs_dropout_prob = dropout
        if hasattr(self.model.config, 'hidden_dropout_prob'):
            self.model.config.hidden_dropout_prob = dropout

        # Freeze specified layers if needed
        self._freeze_modules()

    def forward_tokenizer(self, texts: List[str]):
        # Tokenizes input texts; cache based on training_use_cache flag
        if self.training_use_cache and not hasattr(self, 'text'):
            text = list(itertools.chain(*texts))
            text = self.tokenizer(text=text, return_tensors='pt', padding=True, truncation=True)
            self.text = text.to(device=self.model.device)
        return self.text if self.training_use_cache else self.tokenizer(text=texts, return_tensors='pt', padding=True, truncation=True).to(self.model.device)
   
    def forward(self, text: List[List[str]]) -> Tensor:
        # Check consistency in batch dimensions
        num_per_batch = [len(t) for t in text]
        assert max(num_per_batch) == min(num_per_batch), 'Inconsistent batch sizes'
        
        # Flatten, tokenize, and move to device
        text = list(itertools.chain(*text))
        text = self.tokenizer(text=text, return_tensors='pt', padding=True, truncation=True).to(self.model.device)
        text = text.to(device=self.model.device)
        # Forward pass through BEiT text encoder
        # print("Text >>>>>", text)
        outputs = self.model(**text)
  
        # Get embeddings, apply pooling
        txt_feats = outputs.last_hidden_state.mean(dim=1)  # Mean pooling
        
        # Normalize embeddings for contrastive tasks
        txt_feats = txt_feats / txt_feats.norm(p=2, dim=-1, keepdim=True)
        
        # Reshape to match input batch structure
        txt_feats = txt_feats.reshape(-1, num_per_batch[0], txt_feats.shape[-1])
        return txt_feats

    def _freeze_modules(self):
        # Freezes specified layers of the BEiT model
        if len(self.frozen_modules) == 0:
            return
        
        if self.frozen_modules[0] == "all":
            for param in self.model.parameters():
                param.requires_grad = False
            return

        for name, module in self.model.named_modules():
            for frozen_name in self.frozen_modules:
                if name.startswith(frozen_name):
                    for param in module.parameters():
                        param.requires_grad = False
                    break

    def train(self, mode=True):
        super().train(mode)
        self._freeze_modules()  # Re-apply module freezing on train toggle
