Dolly_demo / app.py
shams1992's picture
Update app.py
22f340e
raw
history blame contribute delete
813 Bytes
from transformers import AutoModelForQuestionAnswering, AutoTokenizer, pipeline
import gradio as gr
import torch
nlp = pipeline('question-answering',
model="databricks/dolly-v2-3b",
torch_dtype=torch.bfloat16,
trust_remote_code=True,
device_map="auto")
def qnamodel(context,question):
question_set = {'context':context,'question':question}
results = nlp(question_set)
return results['answer']
interface = gr.Interface(fn=qnamodel,
inputs=[gr.inputs.Textbox(lines=7, label="Context"), gr.inputs.Textbox(lines=2, label="Question")],
outputs= gr.outputs.Textbox(label="Answer"),
title='Context Question Answering')
interface.launch(inline=False)