Spaces:
Sleeping
Sleeping
File size: 1,475 Bytes
0ee1d88 a01cd5c 0ee1d88 a01cd5c 3131453 b51cd11 3131453 69e6331 3131453 69e6331 0ee1d88 3131453 8cc4952 3131453 02326c1 3131453 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import gradio as gr
import requests
API_URL = "https://api-inference.huggingface.co/models/tiiuae/falcon-7b-instruct"
headers = {"Authorization": "Bearer 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"}
API_URL2 = "https://api-inference.huggingface.co/models/valhalla/longformer-base-4096-finetuned-squadv1"
headers2 = {"Authorization": "Bearer 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"}
model_1_interface = gr.Interface(
fn=lambda question, context="": query(question, context, API_URL, headers),
inputs=[gr.Textbox("question"), gr.Textbox("context")],
outputs=gr.Textbox("answer"),
title="Model 1 Interface",
description="Ask the AI model anything!",
)
model_2_interface = gr.Interface(
fn=lambda question, context="": query(question, context, API_URL2, headers2),
inputs=[gr.Textbox("question"), gr.Textbox("context")],
outputs=gr.Textbox("answer"),
title="Model 2 Interface",
description="Ask the AI model anything!",
)
def query(question, context, api_url, headers):
if api_url == API_URL:
payload = {"question": question, "context": context}
else:
paylaod = {"question": "what is the context of the question: "+question+" :"}
response = requests.post(api_url, headers=headers, json=payload)
return response.json()["answer"]
def switch_model():
if gr.Interface.get_active() == model_1_interface:
model_2_interface.launch()
else:
model_1_interface.launch()
model_1_interface.launch()
|