yestara / app.py
Tonic's picture
Update app.py
391032d
raw
history blame
2.07 kB
import gradio as gr
import requests
import json
from decouple import Config
# Function to interact with Vectara API
def query_vectara(question, chat_history, uploaded_file):
# Handle file upload to Vectara
customer_id = config('CUSTOMER_ID') # Read from .env file
corpus_id = config('CORPUS_ID') # Read from .env file
api_key = config('API_KEY') # Read from .env file
url = f"https://api.vectara.io/v1/upload?c={customer_id}&o={corpus_id}"
post_headers = {
"x-api-key": api_key,
"customer-id": customer_id
}
files = {
"file": (uploaded_file.name, uploaded_file),
"doc_metadata": (None, json.dumps({"metadata_key": "metadata_value"})), # Replace with your metadata
}
response = requests.post(url, files=files, verify=True, headers=post_headers)
if response.status_code == 200:
upload_status = "File uploaded successfully"
else:
upload_status = "Failed to upload the file"
# Get the user's message from the chat history
user_message = chat_history[-1][0]
query_body = {
"query": [
{
"query": user_message, # Use the user's message as the query
"start": 0,
"numResults": 10,
"corpusKey": [
{
"customerId": customer_id,
"corpusId": corpus_id,
"lexicalInterpolationConfig": {"lambda": 0.025}
}
]
}
]
}
api_endpoint = "https://api.vectara.io/v1/query"
return f"{upload_status}\n\nResponse from Vectara API: {response.text}"
# Create a Gradio ChatInterface
iface = gr.Interface(
fn=query_vectara,
inputs=[
gr.inputs.Text(label="Ask a question:"),
gr.inputs.File(label="Upload a file")
],
outputs=gr.outputs.Textbox(),
examples=["Hello", "What is the weather today?", "Tell me a joke"],
title="Vectara Chatbot",
description="Ask me anything using the Vectara API!"
)