Lab10 / app.py
Chatop's picture
Update app.py
bb34b34
import streamlit as st
from transformers.pipelines import pipeline
#from transformers.modeling_auto import AutoModelForQuestionAnswering
#from transformers.tokenization_auto import AutoTokenizer
# b) Load model & tokenizer
#model = AutoModelForQuestionAnswering.from_pretrained(model_name)
#tokenizer = AutoTokenizer.from_pretrained(model_name)
#classifier = pipeline("question-answering", model="deepset/roberta-base-squad2")
model_name = "deepset/xlm-roberta-base-squad2"
nlp = pipeline('question-answering', model=model_name, tokenizer=model_name)
#QA_input = {
# 'question': 'Why is model conversion important?',
# 'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.'
#}
#res = nlp(QA_input)
def main():
st.title("Question & Answering")
with st.form("text_field"):
sentence_1= st.text_area('Enter question:')
sentence_2= st.text_area('Enter context:')
QA_input = {'question':sentence_1, 'context':sentence_2}
#clicked==True only when the button is clicked
clicked = st.form_submit_button("Submit")
if clicked:
results = nlp(QA_input)
st.json(results)
if __name__ == "__main__":
main()