File size: 2,954 Bytes
edd3ce2
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
import gradio as gr
from sentence_transformers import SentenceTransformer
import pandas as pd
#import pickle
import torch
from pathlib import Path  

# Define the function to categorize ages
def categorize_source(source):
    if "Nasa'i" in source:
        return 'nasai'
    elif "Majah" in source:
        return 'ibnmajah'
    elif "Da'ud" in source:
        return 'abudawud'
    elif "Muslim" in source:
        return 'muslim'
    elif "Bukhari" in source:
        return 'bukhari'
    elif "Tirmidhi" in source:
        return 'tirmidhi'
    else:
        return ''

def find(query):
    def get_detailed_instruct(task_description: str, query: str) -> str:
        return f'Instruct: {task_description}\nQuery: {query}'
    
    # Each query must come with a one-sentence instruction that describes the task
    task = 'Given a web search query, retrieve relevant passages that answer the query'
    queries = [
        get_detailed_instruct(task, query)
    ]
    print("cekpoin0\n")
    
    hadiths = pd.read_csv('all_hadiths_clean.csv', delimiter=",")
    
    model = SentenceTransformer('intfloat/multilingual-e5-large-instruct')

    document_embeddings = torch.load('encoded_hadiths_multilingual-e5-large-instruct (1).sav',map_location ='cpu')
    #file = open('encoded_hadiths_multilingual-e5-large-instruct (1).sav','rb')
    #document_embeddings = pickle.load(file)
    print("cekpoin1\n")
    
    query_embeddings = model.encode(queries, convert_to_tensor=True, normalize_embeddings=True)
    scores = (query_embeddings @ document_embeddings.T) * 100
    print("cekpoin2\n")

    # insert the similarity value to dataframe & sort it
    hadiths['similarity'] = scores.tolist()[0]
    sorted_hadiths = hadiths.sort_values(by='similarity', ascending=False)
    print("cekpoin3\n")
    
    results = sorted_hadiths.head(20).drop(columns=['id', 'hadith_id', 'chain_indx'])
    results['source_cat'] = results['source'].apply(categorize_source)
    results['hadith_no'] = results['hadith_no'].str.strip()

    url = 'https://sunnah.com/'+results['source_cat'].astype(str)+':'+results['hadith_no'].astype(str)
    results['text_ar'] = '<a href="'+url+'">'+results['text_ar']+'</a>'
    results = results.drop(columns=['source_cat'])
    
    #return sorted_quran
    filepath = Path(query+'.csv')  
    results.to_csv(filepath,index=False)  
    return results, filepath
    
demo = gr.Interface(
    fn=find, 
    inputs="textbox", 
    outputs=[gr.Dataframe(headers=['source', 'chapter_no', 'hadith_no', 'chapter', 'text_ar', 'text_en'],datatype=["str", "str", "str", "str", "markdown", "str"],wrap=True),gr.DownloadButton()], 
    examples=[
                ["law of inheritance in islam"],
                ["tunjukilah jalan yang lurus"],
                ["سليمان"],
            ],
    title="Hadiths Finder")
#demo = gr.Interface(fn=find, inputs="textbox", outputs="textbox")
    
if __name__ == "__main__":
    demo.launch()