Update app.py
Browse files
app.py
CHANGED
@@ -49,13 +49,11 @@ def main():
|
|
49 |
inputs = tokenizer(query, text, return_tensors='tf')
|
50 |
#outputs = qa_model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
|
51 |
outputs = qa_model(input_ids=inputs['input_ids'])
|
52 |
-
loss = outputs.loss
|
53 |
answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])
|
54 |
answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])
|
55 |
predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
|
56 |
-
answer=tokenizer.decode(predict_answer_tokens)
|
57 |
-
|
58 |
-
st.info(ans)
|
59 |
|
60 |
else:
|
61 |
|
|
|
49 |
inputs = tokenizer(query, text, return_tensors='tf')
|
50 |
#outputs = qa_model(input_ids=inputs['input_ids'], attention_mask=inputs['attention_mask'])
|
51 |
outputs = qa_model(input_ids=inputs['input_ids'])
|
|
|
52 |
answer_start_index = int(tf.math.argmax(outputs.start_logits, axis=-1)[0])
|
53 |
answer_end_index = int(tf.math.argmax(outputs.end_logits, axis=-1)[0])
|
54 |
predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1]
|
55 |
+
answer=tokenizer.decode(predict_answer_tokens)
|
56 |
+
st.info(answer)
|
|
|
57 |
|
58 |
else:
|
59 |
|