Spaces:
Runtime error
Runtime error
AnonymousSub
commited on
Commit
•
627fbe3
1
Parent(s):
df847c3
Update app.py
Browse files
app.py
CHANGED
@@ -38,7 +38,7 @@ def generate_answer_git(processor, model, image, question):
|
|
38 |
input_ids = [processor.tokenizer.cls_token_id] + input_ids
|
39 |
input_ids = torch.tensor(input_ids).unsqueeze(0)
|
40 |
|
41 |
-
generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=
|
42 |
generated_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
43 |
|
44 |
return generated_answer
|
@@ -48,7 +48,7 @@ def generate_answer_blip(processor, model, image, question):
|
|
48 |
# prepare image + question
|
49 |
inputs = processor(images=image, text=question, return_tensors="pt")
|
50 |
|
51 |
-
generated_ids = model.generate(**inputs, max_length=
|
52 |
generated_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
53 |
|
54 |
return generated_answer
|
@@ -56,10 +56,10 @@ def generate_answer_blip(processor, model, image, question):
|
|
56 |
|
57 |
def generate_answer_vilt(processor, model, image, question):
|
58 |
# prepare image + question
|
59 |
-
encoding = processor(images=image, text=question, return_tensors="pt")
|
60 |
|
61 |
with torch.no_grad():
|
62 |
-
outputs = model(**encoding
|
63 |
|
64 |
predicted_class_idx = outputs.logits.argmax(-1).item()
|
65 |
|
|
|
38 |
input_ids = [processor.tokenizer.cls_token_id] + input_ids
|
39 |
input_ids = torch.tensor(input_ids).unsqueeze(0)
|
40 |
|
41 |
+
generated_ids = model.generate(pixel_values=pixel_values, input_ids=input_ids, max_length=128)#50)
|
42 |
generated_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
43 |
|
44 |
return generated_answer
|
|
|
48 |
# prepare image + question
|
49 |
inputs = processor(images=image, text=question, return_tensors="pt")
|
50 |
|
51 |
+
generated_ids = model.generate(**inputs, max_length=128)#50)
|
52 |
generated_answer = processor.batch_decode(generated_ids, skip_special_tokens=True)
|
53 |
|
54 |
return generated_answer
|
|
|
56 |
|
57 |
def generate_answer_vilt(processor, model, image, question):
|
58 |
# prepare image + question
|
59 |
+
encoding = processor(images=image, text=question, max_length=128, return_tensors="pt")
|
60 |
|
61 |
with torch.no_grad():
|
62 |
+
outputs = model(**encoding)
|
63 |
|
64 |
predicted_class_idx = outputs.logits.argmax(-1).item()
|
65 |
|