import gradio as gr from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline model_name = "bert-large-uncased-whole-word-masking-finetuned-squad" nlp = pipeline('question-answering', model=model_name, tokenizer=model_name) def chat(context, question): QA_input = { "question" : question, "context" : context } res = nlp(QA_input) return res['answer'] screen = gr.Interface( fn = chat, inputs = [gr.Textbox(lines = 8, placeholder = "Enter your context here πŸ‘‰"), gr.Textbox(lines = 2, placeholder = "Enter your question here πŸ‘‰")], outputs = gr.Textbox(lines = 15, placeholder = "Your answer will be here soon πŸš€"), title="Facilitating the QnA with bert-large-uncased-whole-word-masking-finetuned-squad πŸ‘©πŸ»β€πŸ’»πŸ““βœπŸ»πŸ’‘", description="This app aims to facilitate the simple QnA with the provided contextπŸ’‘", theme="soft", article = """### Disclaimer : This model is purely used for QnA. User is expected to paste the text from which they want the answer in context section.
       Then paste the question in the question section.
       User will get the answer in the output section.""" ) screen.launch()