Spaces:
Running
Running
File size: 1,819 Bytes
804e892 2b1f2fe 804e892 bedd1fe 804e892 15d803b 804e892 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 |
import gradio as gr
import random
import time
from sentence_transformers import SentenceTransformer
import pinecone
import os
retriever = SentenceTransformer("sentence-transformers/all-MiniLM-L12-v2")
pinecone_key = os.environ['PINECONE_SECRET']
pinecone.init(
api_key=os.environ['PINECONE_SECRET'],
environment="eu-west1-gcp"
)
index_name = 'quran-semantic-search'
index = pinecone.Index(index_name)
def query_pinecone(query, top_k):
xq = retriever.encode([query]).tolist()
# search vector database for similar vector
xc = index.query(xq, top_k=top_k, include_metadata=True)
return xc['matches']
def format_search_result(result):
data = result['metadata']
message = f"Ayah no: {data['ayah']}\nSurah no: {data['sorah']}\nSentence:{data['arabic-text']}\nTranslation: {data['en-translation']}\n Tafsir:{data['en-tafsir-mokhtasar']}\n Relevant Tafsir: {data['vector-chunk']}"
return message
with gr.Blocks() as demo:
gr.Markdown(
"""
# Quran Semantic Search!
Ask questions and seek advice and see what the Holy book has to say about it.
""")
chatbot = gr.Chatbot()
msg = gr.Textbox()
clear = gr.Button("Clear")
def user(user_message, history):
return "", history + [[user_message, None]]
def bot(history):
query = history[-1][0]
results = query_pinecone(query, top_k=3)
for match in results:
if history[-1][1] == None:
history[-1][1] = format_search_result(match)
else:
history.append([None, format_search_result(match)])
return history
msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then(
bot, chatbot, chatbot
)
clear.click(lambda: None, None, chatbot, queue=False)
demo.launch()
|