import torch
import os
from typing import Optional, Dict
from torch import nn, Tensor
from torch import functional as F
from transformers import CLIPProcessor, CLIPModel

from transformers import BertTokenizer
from CLIP.clip import clip

def freeze_optimizer_parameters_clip(module):
    """
    A function that freezes finetune_layers in CLIP based on 
    config arguments like freeze_lower and freeze_all.
    """
    finetune_layers = ["classifier", "visual.layer4", "visual.attnpool", "transformer.resblocks.11", "ln_final", "text_projection", "logit_scale"]

    print(f"Freezing CLIP's parameters: {finetune_layers}")

    detected = []
    for name, submodule in module.named_parameters():
        flag = True
        for layer_type in finetune_layers:
            if layer_type in name:
                detected.append(layer_type)
                flag = False
        if flag:
            submodule.requires_grad = False
    print("Layers not detected: ", set(finetune_layers) - set(detected))


class CLIP(nn.Module):

    def __init__(self):
        super().__init__()
        self.model = CLIPModel.from_pretrained("/root/autodl-tmp/clip")
        self.processor = CLIPProcessor.from_pretrained("/root/autodl-tmp/clip")

        self.model, _ = clip.load("ViT-B/32", device="cuda", jit=False)

        # creat itm head
        self.classifier = self.build_mlp(input_dim=512*2, output_dim=2)

        self.text_only = False
        self.image_only = False
    def build_mlp(self, input_dim, output_dim):
        return nn.Sequential(
            nn.Linear(input_dim, input_dim * 2),
            nn.LayerNorm(input_dim * 2),
            nn.GELU(),
            nn.Linear(input_dim* 2, input_dim * 2),
            nn.LayerNorm(input_dim * 2),
            nn.GELU(),
            nn.Linear(input_dim * 2, output_dim)
        )

    def cross_model_att(self,image_feat, text_feat):
        return image_feat,text_feat

    def forward(self, sample_list: Dict[str, Tensor]) -> Dict[str, Tensor]:
        output_dict = {}

        input_ids =sample_list['text'].cuda()#self.processor.tokenizer(sample_list["text"], return_tensors="pt", padding=True) 
        image = sample_list["image_id"].cuda()

        # Zero out a modality if the model should be unimodal
        if self.text_only:
            image = torch.zeros(image.shape, device="cuda").long()
        elif self.image_only:
            input_ids = torch.zeros(input_ids.shape, device="cuda").long()

        #input = self.processor(sample_list["text"],sample_list["image_id"], return_tensors="pt", padding=True).to("cuda")
        text_features = self.model.encode_text(input_ids).float()
        image_features = self.model.encode_image(image).float()
        #text_features = outputs.text_embeds
        #image_features  = outputs.image_embeds

        text_features = text_features / text_features.norm(dim=-1, keepdim=True)
        image_features = image_features / image_features.norm(dim=-1, keepdim=True)

        text_features.to("cuda")
        image_features.to("cuda")

        features = torch.cat((text_features, image_features), dim=1)
        logits = self.classifier(features)

        return logits
