remove cuda
Browse filesprint debug language
- image.py +3 -4
- language.py +2 -2
image.py
CHANGED
@@ -15,10 +15,9 @@ def image(url, text):
|
|
15 |
outputs = model(**encoding)
|
16 |
logits = outputs.logits
|
17 |
idx = logits.argmax(-1).item()
|
18 |
-
|
19 |
-
|
20 |
-
|
21 |
-
|
22 |
return model.config.id2label[idx]
|
23 |
|
24 |
# prepare image + question
|
|
|
15 |
outputs = model(**encoding)
|
16 |
logits = outputs.logits
|
17 |
idx = logits.argmax(-1).item()
|
18 |
+
print("question asked:", text)
|
19 |
+
print("image link:", url)
|
20 |
+
print("Predicted answer:", model.config.id2label[idx])
|
|
|
21 |
return model.config.id2label[idx]
|
22 |
|
23 |
# prepare image + question
|
language.py
CHANGED
@@ -6,8 +6,8 @@ def longText(answere, question):
|
|
6 |
input_text = "i have a question and answer.\nthe question is : {}\n the response is : {}\n with this information, can you create an answer phrase?".format(question, answere)
|
7 |
|
8 |
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
|
9 |
-
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large"
|
10 |
-
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
11 |
outputs = model.generate(input_ids)
|
12 |
|
13 |
return tokenizer.decode(outputs[0]).replace("<pad>","").replace("</s>","")
|
|
|
6 |
input_text = "i have a question and answer.\nthe question is : {}\n the response is : {}\n with this information, can you create an answer phrase?".format(question, answere)
|
7 |
|
8 |
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
|
9 |
+
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
|
10 |
+
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
11 |
outputs = model.generate(input_ids)
|
12 |
|
13 |
return tokenizer.decode(outputs[0]).replace("<pad>","").replace("</s>","")
|