import gradio as gr import spaces from transformers import pipeline if torch.cuda.is_available(): print("Running on GPU") else: print("Running on CPU") pipe = pipeline(task="question-answering", model="decodingchris/distilbert-base-uncased-finetuned-squad-v2") @spaces.GPU def get_answer(context, question): result = pipe(context=context, question=question) return result['answer'] demo = gr.Interface(fn=get_answer, inputs=["textarea", "text"], outputs=gr.Textbox(label="answer")) demo.launch()