Spaces:
Running
Running
import streamlit as st | |
import requests | |
import os | |
from streamlit_chat import message | |
import random | |
def query(payload): | |
api_token = os.getenv("api_token") | |
model_id = "deepset/roberta-base-squad2" | |
headers = {"Authorization": f"Bearer {api_token}"} | |
API_URL = f"https://api-inference.huggingface.co/models/{model_id}" | |
response = requests.post(API_URL, headers=headers, json=payload) | |
return response.json(), response | |
context = "To extract information from documents, use sentence similarity task. To do sentiment analysis from tweets, use text classification task. To detect masks from images, use object detection task. To extract information from invoices, use named entity recognition from token classification task." | |
message_history = [{"text":"Let's find out the best task for your use case! Tell me about your use case :)", "is_user":False}] | |
for msg in message_history: | |
message(msg["text"], is_user = msg["is_user"]) # display all the previous message | |
input = st.text_input("Ask me π€") | |
message_history.append({"text":input, "is_user" : True}) | |
placeholder = st.empty() # placeholder for latest message | |
data, resp = query( | |
{ | |
"inputs": { | |
"question": input, | |
"context": context, | |
} | |
} | |
) | |
if resp.status_code == 200: | |
model_answer = data["answer"] | |
response_templates = [f"{model_answer} is the best task for this π€©", f"I think you should use {model_answer} πͺ", f"I think {model_answer} should work for you π€"] | |
bot_answer = random.choice(response_templates) | |
message_history.append({"text":bot_answer, "is_user" : False}) | |
with placeholder.container(): | |
last_message = message_history[-1] | |
if last_message != "": | |
message(last_message["text"], last_message["is_user"]) # display the latest message | |