flaviagiammarino commited on
Commit
3870098
1 Parent(s): 3ee152c

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -4
README.md CHANGED
@@ -31,6 +31,7 @@ which use ResNet-50, ResNet-50x4 and ViT32 as image encoders. This repository in
31
  ```python
32
  import requests
33
  from PIL import Image
 
34
 
35
  from transformers import CLIPProcessor, CLIPModel
36
 
@@ -39,11 +40,17 @@ processor = CLIPProcessor.from_pretrained("flaviagiammarino/pubmed-clip-vit-base
39
 
40
  url = "https://huggingface.co/flaviagiammarino/pubmed-clip-vit-base-patch32/resolve/main/scripts/input.jpeg"
41
  image = Image.open(requests.get(url, stream=True).raw)
 
42
 
43
- inputs = processor(text=["Chest X-Ray", "Brain MRI", "Abdomen CT Scan"], images=image, return_tensors="pt", padding=True)
44
- outputs = model(**inputs)
45
- logits_per_image = outputs.logits_per_image # this is the image-text similarity score
46
- probs = logits_per_image.softmax(dim=1) # we can take the softmax to get the label probabilities
 
 
 
 
 
47
  ```
48
 
49
  ## Additional Information
 
31
  ```python
32
  import requests
33
  from PIL import Image
34
+ import matplotlib.pyplot as plt
35
 
36
  from transformers import CLIPProcessor, CLIPModel
37
 
 
40
 
41
  url = "https://huggingface.co/flaviagiammarino/pubmed-clip-vit-base-patch32/resolve/main/scripts/input.jpeg"
42
  image = Image.open(requests.get(url, stream=True).raw)
43
+ text = ["Chest X-Ray", "Brain MRI", "Abdominal CT Scan"]
44
 
45
+ inputs = processor(text=text, images=image, return_tensors="pt", padding=True)
46
+ probs = model(**inputs).logits_per_image.softmax(dim=1).squeeze()
47
+
48
+ plt.subplots()
49
+ plt.imshow(image)
50
+ plt.title("".join([x[0] + ": " + x[1] + "\n" for x in zip(text, [format(prob, ".4%") for prob in probs])]))
51
+ plt.axis("off")
52
+ plt.tight_layout()
53
+ plt.show()
54
  ```
55
 
56
  ## Additional Information