LayBraid commited on
Commit
c78b520
1 Parent(s): 998ea00

update app

Browse files
Files changed (1) hide show
  1. app.py +2 -1
app.py CHANGED
@@ -11,6 +11,7 @@ processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
11
  cifar100 = CIFAR100(root=os.path.expanduser("~/.cache"), download=True, train=False)
12
 
13
  text_inputs = torch.cat([clip.tokenize(f"a photo of a {c}") for c in cifar100.classes])
 
14
 
15
 
16
  # TODO debug cette ligne pour avoir un affichage correct
@@ -20,7 +21,7 @@ text_inputs = torch.cat([clip.tokenize(f"a photo of a {c}") for c in cifar100.cl
20
 
21
 
22
  def send_inputs(img):
23
- inputs = processor(text=cifar100.classes, images=img, return_tensors="pt", padding=True)
24
  outputs = model(**inputs)
25
  logits_per_image = outputs.logits_per_image
26
  probs = logits_per_image.softmax(dim=1)
 
11
  cifar100 = CIFAR100(root=os.path.expanduser("~/.cache"), download=True, train=False)
12
 
13
  text_inputs = torch.cat([clip.tokenize(f"a photo of a {c}") for c in cifar100.classes])
14
+ text_inputs_2 = ["a photo of a dog", "a photo of a cat"]
15
 
16
 
17
  # TODO debug cette ligne pour avoir un affichage correct
 
21
 
22
 
23
  def send_inputs(img):
24
+ inputs = processor(text=text_inputs_2, images=img, return_tensors="pt", padding=True)
25
  outputs = model(**inputs)
26
  logits_per_image = outputs.logits_per_image
27
  probs = logits_per_image.softmax(dim=1)