flaviagiammarino commited on
Commit
2b98077
1 Parent(s): 525b1c2

Upload 2 files

Browse files
Files changed (2) hide show
  1. scripts/pt_example.py +4 -3
  2. scripts/tf_example.py +4 -3
scripts/pt_example.py CHANGED
@@ -7,15 +7,16 @@ from transformers import CLIPProcessor, CLIPModel
7
  model = CLIPModel.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32")
8
  processor = CLIPProcessor.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32")
9
 
10
- url = "https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcSjP8UWzpGqXKwlC1zPRhcJOXThfI4pXgg2Zhd1B-cstpnEDalY"
11
  image = Image.open(requests.get(url, stream=True).raw)
12
- text = ["Chest X-Ray", "Brain MRI"]
13
 
14
  inputs = processor(text=text, images=image, return_tensors="pt", padding=True)
15
  probs = model(**inputs).logits_per_image.softmax(dim=1).detach().numpy().flatten()
16
 
17
  plt.subplots()
18
  plt.imshow(image)
19
- plt.title("".join([x[0] + ": " + x[1] + " " for x in zip(text, [format(prob, ".4%") for prob in probs])]))
20
  plt.axis("off")
 
21
  plt.show()
 
7
  model = CLIPModel.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32")
8
  processor = CLIPProcessor.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32")
9
 
10
+ url = "https://d168r5mdg5gtkq.cloudfront.net/medpix/img/full/synpic18755.jpg"
11
  image = Image.open(requests.get(url, stream=True).raw)
12
+ text = ["Chest X-Ray", "Brain MRI", "Abdominal CT Scan"]
13
 
14
  inputs = processor(text=text, images=image, return_tensors="pt", padding=True)
15
  probs = model(**inputs).logits_per_image.softmax(dim=1).detach().numpy().flatten()
16
 
17
  plt.subplots()
18
  plt.imshow(image)
19
+ plt.title("".join([x[0] + ": " + x[1] + "\n" for x in zip(text, [format(prob, ".4%") for prob in probs])]))
20
  plt.axis("off")
21
+ plt.tight_layout()
22
  plt.show()
scripts/tf_example.py CHANGED
@@ -8,15 +8,16 @@ from transformers import CLIPProcessor, TFCLIPModel
8
  model = TFCLIPModel.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32")
9
  processor = CLIPProcessor.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32")
10
 
11
- url = "https://encrypted-tbn3.gstatic.com/images?q=tbn:ANd9GcSjP8UWzpGqXKwlC1zPRhcJOXThfI4pXgg2Zhd1B-cstpnEDalY"
12
  image = Image.open(requests.get(url, stream=True).raw)
13
- text = ["Chest X-Ray", "Brain MRI"]
14
 
15
  inputs = processor(text=text, images=image, return_tensors="tf", padding=True)
16
  probs = tf.nn.softmax(model(**inputs).logits_per_image, axis=-1).numpy().flatten()
17
 
18
  plt.subplots()
19
  plt.imshow(image)
20
- plt.title("".join([x[0] + ": " + x[1] + " " for x in zip(text, [format(prob, ".4%") for prob in probs])]))
21
  plt.axis("off")
 
22
  plt.show()
 
8
  model = TFCLIPModel.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32")
9
  processor = CLIPProcessor.from_pretrained("flaviagiammarino/pubmed-clip-vit-base-patch32")
10
 
11
+ url = "https://d168r5mdg5gtkq.cloudfront.net/medpix/img/full/synpic18755.jpg"
12
  image = Image.open(requests.get(url, stream=True).raw)
13
+ text = ["Chest X-Ray", "Brain MRI", "Abdominal CT Scan"]
14
 
15
  inputs = processor(text=text, images=image, return_tensors="tf", padding=True)
16
  probs = tf.nn.softmax(model(**inputs).logits_per_image, axis=-1).numpy().flatten()
17
 
18
  plt.subplots()
19
  plt.imshow(image)
20
+ plt.title("".join([x[0] + ": " + x[1] + "\n" for x in zip(text, [format(prob, ".4%") for prob in probs])]))
21
  plt.axis("off")
22
+ plt.tight_layout()
23
  plt.show()