import streamlit as st import transformers from transformers import pipeline from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline model_name = 'deepset/xlm-roberta-large-squad2' model = AutoModelForQuestionAnswering.from_pretrained(model_name) tokenizer = AutoTokenizer.from_pretrained(model_name) # a) Get predictions ctx = st.text_area('Context') if ctx: q = st.text_area('Ask your question :)') nlp = pipeline('question-answering', model=model_name, tokenizer=model_name) #QA_input = { # 'question': 'Why is model conversion important?', # 'context': 'The option to convert models between FARM and transformers gives freedom to the user and let people easily switch between frameworks.' #} res = nlp(context=ctx, question=q) st.json(res) #from transformers import pipeline #model_name = "deepset/xlm-roberta-large-squad2" #qa_pl = pipeline('question-answering', model=model_name, tokenizer=model_name, device=0) #predictions = [] # batches might be faster #ctx = st.text_area('Gib context') #q = st.text_area('Gib question') #if context: # result = qa_pl(context=ctx, question=q) # st.json(result["answer"]) #for ctx, q in test_df[["context", "question"]].to_numpy(): # result = qa_pl(context=ctx, question=q) # predictions.append(result["answer"]) #model = AutoModelForQuestionAnswering.from_pretrained(model_name) #tokenizer = AutoTokenizer.from_pretrained(model_name)