Spaces:
Running
Running
import gradio as gr | |
from sentence_transformers import SentenceTransformer | |
import pandas as pd | |
#import pickle | |
import torch | |
from pathlib import Path | |
# Define the function to categorize ages | |
def categorize_source(source): | |
if "Nasa'i" in source: | |
return 'nasai' | |
elif "Majah" in source: | |
return 'ibnmajah' | |
elif "Da'ud" in source: | |
return 'abudawud' | |
elif "Muslim" in source: | |
return 'muslim' | |
elif "Bukhari" in source: | |
return 'bukhari' | |
elif "Tirmidhi" in source: | |
return 'tirmidhi' | |
else: | |
return '' | |
def find(query): | |
def get_detailed_instruct(task_description: str, query: str) -> str: | |
return f'Instruct: {task_description}\nQuery: {query}' | |
# Each query must come with a one-sentence instruction that describes the task | |
task = 'Given a web search query, retrieve relevant passages that answer the query' | |
queries = [ | |
get_detailed_instruct(task, query) | |
] | |
print("cekpoin0\n") | |
hadiths = pd.read_csv('all_hadiths_clean.csv', delimiter=",") | |
model = SentenceTransformer('intfloat/multilingual-e5-large-instruct') | |
document_embeddings = torch.load('encoded_hadiths_multilingual-e5-large-instruct (1).sav',map_location ='cpu') | |
#file = open('encoded_hadiths_multilingual-e5-large-instruct (1).sav','rb') | |
#document_embeddings = pickle.load(file) | |
print("cekpoin1\n") | |
query_embeddings = model.encode(queries, convert_to_tensor=True, normalize_embeddings=True) | |
scores = (query_embeddings @ document_embeddings.T) * 100 | |
print("cekpoin2\n") | |
# insert the similarity value to dataframe & sort it | |
hadiths['similarity'] = scores.tolist()[0] | |
sorted_hadiths = hadiths.sort_values(by='similarity', ascending=False) | |
print("cekpoin3\n") | |
results = sorted_hadiths.head(20).drop(columns=['id', 'hadith_id', 'chain_indx']) | |
results['source_cat'] = results['source'].apply(categorize_source) | |
results['hadith_no'] = results['hadith_no'].str.strip() | |
url = 'https://sunnah.com/'+results['source_cat'].astype(str)+':'+results['hadith_no'].astype(str) | |
results['text_ar'] = '<a href="'+url+'">'+results['text_ar']+'</a>' | |
results = results.drop(columns=['source_cat']) | |
#return sorted_quran | |
filepath = Path(query+'.csv') | |
results.to_csv(filepath,index=False) | |
return results, filepath | |
demo = gr.Interface( | |
fn=find, | |
inputs="textbox", | |
outputs=[gr.Dataframe(headers=['source', 'chapter_no', 'hadith_no', 'chapter', 'text_ar', 'text_en'],datatype=["str", "str", "str", "str", "markdown", "str"],wrap=True),gr.DownloadButton()], | |
examples=[ | |
["law of inheritance in islam"], | |
["tunjukilah jalan yang lurus"], | |
["ุณููู ุงู"], | |
], | |
title="Hadiths Finder") | |
#demo = gr.Interface(fn=find, inputs="textbox", outputs="textbox") | |
if __name__ == "__main__": | |
demo.launch() |