import json; import streamlit as st; import requests as req; from transformers import pipeline WIKI_URL = 'https://en.wikipedia.org/w/api.php'; WIKI_BERT = "&titles=BERT_(language_model)" WIKI_QUERY = "?format=json&action=query&prop=extracts&explaintext=1"; WIKI_METHOD = 'GET' DPR_MODEL = "deepset/roberta-base-squad2" #, model="distilbert-base-cased-distilled-squad" pipe_exqa = pipeline("question-answering", model=DPR_MODEL) st.title('Question Answering example') st.subheader('1. A simple question (extractive, closed domain)') response = req.request(WIKI_METHOD, f'{WIKI_URL}{WIKI_QUERY}{WIKI_BERT}') resp_json = json.loads(response.content.decode("utf-8")) wiki_bert = resp_json['query']['pages']['62026514']['extract'] paragraph = wiki_bert par_text = 'Paragraph used for QA (you can also edit, or copy/paste new content)' written_passage = st.text_area(par_text, paragraph, height=250) if written_passage: paragraph = written_passage question = 'How many attention heads does Bert have?' # question = 'How many languages does bert understand?' query_text = 'Question used for QA (you can also edit, and experiment with the answers)' written_question = st.text_input(query_text, question) if written_question: question = written_question if st.button('Run QA inference (get answer prediction)'): if paragraph and question: try: qa_result = pipe_exqa(question=question, context=paragraph) except Exception as e: qa_result = str(e) if "answer" in qa_result.keys(): answer_span, answer_score = qa_result["answer"], qa_result["score"] st.write(f'Answer: **{answer_span}**') start_par, stop_para = max(0, qa_result["start"]-86), min(qa_result["end"]+90, len(paragraph)) answer_context = paragraph[start_par:stop_para].replace(answer_span, f'**{answer_span}**') st.write(f'Answer context (and score): ... _{answer_context}_ ... (score: {format(answer_score, ".3f")})') st.write(f'Answer JSON: '); st.write(qa_result) else: st.write('Write some passage of text and a question'); st.stop()