|
import requests |
|
from PIL import Image |
|
import matplotlib.pyplot as plt |
|
import tensorflow as tf |
|
|
|
from transformers import CLIPProcessor, TFCLIPModel |
|
|
|
model = TFCLIPModel.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32") |
|
processor = CLIPProcessor.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32") |
|
|
|
url = "https://d168r5mdg5gtkq.cloudfront.net/medpix/img/full/synpic9078.jpg" |
|
image = Image.open(requests.get(url, stream=True).raw) |
|
text = ["Chest X-Ray", "Brain MRI", "Abdominal CT Scan"] |
|
|
|
inputs = processor(text=text, images=image, return_tensors="tf", padding=True) |
|
probs = tf.squeeze(tf.nn.softmax(model(**inputs).logits_per_image, axis=-1)) |
|
|
|
plt.subplots() |
|
plt.imshow(image) |
|
plt.title("".join([x[0] + ": " + x[1] + "\n" for x in zip(text, [format(prob, ".4%") for prob in probs])])) |
|
plt.axis("off") |
|
plt.tight_layout() |
|
plt.show() |
|
|