import torch
import clip
from PIL import Image
import torch.nn.functional as F

from clip.model import build_model


class ClipEmbeding:
    device = "cuda" if torch.cuda.is_available() else "cpu"

    def __init__(self, checkpoint_path):
        self.model, self.processor = clip.load(checkpoint_path, device=self.device)
        self.tokenizer = clip.tokenize

    def probs(self, image: Image):
        process_image = self.processor(image).unsqueeze(0).to(self.device)
        text = self.tokenizer(["a diagram", "a tiger", "a cat"]).to(self.device)

        with torch.no_grad():
            # logits_per_image: 这个图片对每个文本的相似度
            # logits_per_text : 这个文本对每个图像的相似度
            logits_per_image, logits_per_text = self.model(process_image, text)
            probs = logits_per_image.cpu().float()

        print("Label probs:", probs.softmax(dim=-1))
        label = torch.Tensor([2]).long()
        loss = F.cross_entropy(probs, label)
        print(loss)

    def embeding(self, image: Image, text: str):
        process_image = self.processor(image).unsqueeze(0).to(self.device)
        text = self.tokenizer([text]).to(self.device)

        image_features = self.model.encode_image(process_image)
        text_features = self.model.encode_text(text)
        return image_features, text_features


if __name__ == "__main__":
    checkpoint_path = r"D:\Project\checkpoint\ViT_14_336px_clip\ViT-L-14-336px.pt"
    image_path = r'D:\Project\multimoding\data\clip_dataset\cat.jpg'

    pil_image = Image.open(image_path)

    clip_embeding = ClipEmbeding(checkpoint_path)
    clip_embeding.probs(pil_image)

