from PIL import Image
import requests
from transformers import ChineseCLIPProcessor, ChineseCLIPModel
from transformers import CLIPProcessor, CLIPModel
import os
import torch
import torch.nn as nn

dataRoot = os.path.join(os.path.dirname(os.path.abspath(__file__)),'../DownloadRoot')
                                         
# model = ChineseCLIPModel.from_pretrained(os.path.join(dataRoot,"models/OFA-Sys/chinese-clip-vit-huge-patch14"))
# processor = ChineseCLIPProcessor.from_pretrained(os.path.join(dataRoot,"models/OFA-Sys/chinese-clip-vit-huge-patch14"))
model = CLIPModel.from_pretrained(os.path.join(dataRoot,"models/openai/clip-vit-large-patch14"))
processor = CLIPProcessor.from_pretrained(os.path.join(dataRoot,"models/openai/clip-vit-large-patch14"))

# url = "https://clip-cn-beijing.oss-cn-beijing.aliyuncs.com/pokemon.jpeg"
# image = Image.open(requests.get(url, stream=True).raw)
image = Image.open('./Clip/im.jpg')
# Squirtle, Bulbasaur, Charmander, Pikachu in English
# texts = ["杰尼龟", "妙蛙种子", "小火龙", "皮卡丘"]
texts = ["Jenny Turtle", "Frog Seed", "Little Fire Dragon", "Pikachu"]

# compute image feature
inputs = processor(images=image, return_tensors="pt")
image_features = model.get_image_features(**inputs)
image_features = image_features / image_features.norm(p=2, dim=-1, keepdim=True)  # normalize

# compute text features
inputs = processor(text=texts, padding=True, return_tensors="pt")
text_features = model.get_text_features(**inputs)
text_features = text_features / text_features.norm(p=2, dim=-1, keepdim=True)  # normalize


# compute image-text similarity scores
inputs = processor(text=texts, images=image, return_tensors="pt", padding=True)
outputs = model(**inputs)
logits_per_image = outputs.logits_per_image  # this is the image-text similarity score
probs = logits_per_image.softmax(dim=1)  # probs: [[1.1419e-02, 1.0478e-02, 5.2018e-04, 9.7758e-01]]

print(probs)
