add print debug
Browse files- language.py +6 -0
language.py
CHANGED
@@ -3,10 +3,16 @@ from transformers import T5Tokenizer, T5ForConditionalGeneration
|
|
3 |
|
4 |
def longText(answere, question):
|
5 |
|
|
|
|
|
|
|
6 |
input_text = "i have a question and answer.\nthe question is :",question,"\n the response is :",answere,"\n with this information, can you create an answer phrase?"
|
|
|
7 |
|
8 |
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
|
9 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
|
10 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
11 |
outputs = model.generate(input_ids)
|
|
|
|
|
12 |
return tokenizer.decode(outputs[0])
|
|
|
3 |
|
4 |
def longText(answere, question):
|
5 |
|
6 |
+
print(answere)
|
7 |
+
print(question)
|
8 |
+
|
9 |
input_text = "i have a question and answer.\nthe question is :",question,"\n the response is :",answere,"\n with this information, can you create an answer phrase?"
|
10 |
+
print(input_text)
|
11 |
|
12 |
tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-large")
|
13 |
model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-large")
|
14 |
input_ids = tokenizer(input_text, return_tensors="pt").input_ids
|
15 |
outputs = model.generate(input_ids)
|
16 |
+
|
17 |
+
print(tokenizer.decode(outputs[0]))
|
18 |
return tokenizer.decode(outputs[0])
|