from transformers import CLIPVisionModel, CLIPModel
from torch import nn
import torch
import math
from transformers.models.clip.modeling_clip import _get_vector_norm, BaseModelOutputWithPooling
from typing import Optional, Union, Tuple

class CosineHead(nn.Module):
    def __init__(self, in_features, out_features):
        super().__init__()
        self.register_buffer('scale', torch.tensor(0.0))
        self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features))
        nn.init.kaiming_uniform_(self.weight, a=math.sqrt(5))

    def forward(self, image_embeds):
        image_embeds = image_embeds / _get_vector_norm(image_embeds) #[B,C]
        text_embeds = self.weight / _get_vector_norm(self.weight) #[cls,C]

        # cosine similarity as logits
        logit_scale = self.scale.exp()
        logits = torch.matmul(image_embeds, text_embeds.t()) * logit_scale.to(text_embeds.device)

        return logits

class CLIPClassifier(CLIPVisionModel):
    def __init__(self, config, projection_dim=1024, num_cls=2):
        super().__init__(config)

        self.visual_projection = nn.Linear(config.hidden_size, projection_dim, bias=False)
        self.head = CosineHead(projection_dim, num_cls)

    def forward(
        self,
        pixel_values: Optional[torch.FloatTensor] = None,
        output_attentions: Optional[bool] = None,
        output_hidden_states: Optional[bool] = None,
        interpolate_pos_encoding: bool = False,
        return_dict: Optional[bool] = None,
    ) -> torch.Tensor:
        r"""
        Returns:

        Examples:

        ```python
        >>> from PIL import Image
        >>> import requests
        >>> from transformers import AutoProcessor, CLIPVisionModel

        >>> model = CLIPVisionModel.from_pretrained("openai/clip-vit-base-patch32")
        >>> processor = AutoProcessor.from_pretrained("openai/clip-vit-base-patch32")

        >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
        >>> image = Image.open(requests.get(url, stream=True).raw)

        >>> inputs = processor(images=image, return_tensors="pt")

        >>> outputs = model(**inputs)
        >>> last_hidden_state = outputs.last_hidden_state
        >>> pooled_output = outputs.pooler_output  # pooled CLS states
        ```"""
        return_dict = return_dict if return_dict is not None else self.config.use_return_dict

        vision_outputs = self.vision_model(
            pixel_values=pixel_values,
            output_attentions=output_attentions,
            output_hidden_states=output_hidden_states,
            return_dict=return_dict,
            interpolate_pos_encoding=interpolate_pos_encoding,
        )

        pooled_output = vision_outputs[1]  # pooled_output
        image_features = self.visual_projection(pooled_output)

        logits = self.head(image_features)
        return logits
