from transformers import pipeline, AutoTokenizer, AutoModelForQuestionAnswering import gradio as gr import time # Author information author = "Chris Choodai" tokenizer = AutoTokenizer.from_pretrained("distilbert-base-cased-distilled-squad") model = AutoModelForQuestionAnswering.from_pretrained("distilbert-base-cased-distilled-squad") qa_pipe = pipeline("question-answering", model=model, tokenizer=tokenizer) def response(context, question): result = qa_pipe(context=context, question=question) return result['answer'] input_context = gr.Textbox(lines=10, label='Input Context', placeholder='Enter context here...') input_question = gr.Textbox(label='Input Question', placeholder='Ask your question here...') output_text = gr.Textbox(label="Response", placeholder='Response will display here..') interface = gr.Interface(response, inputs=[input_context, input_question], outputs=output_text, title="
Enter context and question to get the response.
Developed by {author}.