omarelsayeed commited on
Commit
44de40e
1 Parent(s): e0e65a3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +136 -54
app.py CHANGED
@@ -1,56 +1,138 @@
1
  import pandas as pd
2
- import spacy
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3
  import gradio as gr
4
- import re
5
- import json
6
- import random
7
-
8
- dataset = pd.read_csv('hatith_all_2.csv')
9
- nlp = spacy.load('aravec_model')
10
- all_docs = [nlp(doc) for doc in dataset['hadith']]
11
-
12
- def clean_text(text):
13
- # remove tashkeel
14
- text = re.sub('[~ًٌٍَُِّْ]', '', text)
15
- text = re.sub('[ًٌٍَُِّْـ]', '', text)
16
- # ozbot el alef
17
- text = re.sub('إ', 'ا', text)
18
- text = re.sub('أ', 'ا', text)
19
- text = re.sub('آ', 'ا', text)
20
- # remove longation
21
- text = re.sub(r'(.)\1+', r'\1\1', text)
22
- # remove extra spaces
23
- text = re.sub(' +', ' ', text)
24
- text = text.strip()
25
- text = re.sub('[\s]+', ' ', text)
26
- # remove punctuations
27
- text = re.sub(r'[^\w\s]', '', text)
28
- return text
29
-
30
- def get_similar_sentences(text):
31
- text = clean_text(text)
32
- ref_sentence = nlp(text)
33
- similar_sentences = []
34
- #sampled_docs = random.sample(all_docs , 15000)
35
- for i, doc in enumerate(all_docs):
36
- similarity_score = ref_sentence.similarity(doc)
37
- similar_sentences.append({
38
- "similar_sentence": dataset['reference'][i]+str(doc),
39
- "similarity_score": similarity_score,
40
- })
41
- similar_sentences.sort(key=lambda x: x['similarity_score'], reverse=True)
42
- top_10 = similar_sentences[:10]
43
- return top_10
44
-
45
- text_input = gr.inputs.Textbox(lines = 1 , label = "Enter a Quran Verse" )
46
-
47
- output_text = gr.JSON()
48
-
49
- examples = ['الحمدلله رب العالمين',
50
- 'مثلهم كمثل الذي استوقد نارًا فلما أضاءت ما حوله ذهب الله بنورهم وتركهم في ظلماتٍ لا يبصرون',
51
- 'إن الذين كفروا سواء عليهم أأنذرتهم أم لم تنذرهم لا يؤمنون',
52
- 'ونادى أصحاب الجنة أصحاب النار أن قد وجدنا ما وعدنا ربنا حقا فهل وجدتم ما وعد ربكم حقا ۖ قالوا نعم ۚ فأذن مؤذن بينهم أن لعنة الله على الظالمين'
53
- ]
54
-
55
- intf = gr.Interface(fn = get_similar_sentences , inputs = text_input , outputs =output_text, examples=examples )
56
- intf.launch(debug = True)
 
 
1
  import pandas as pd
2
+ from rank_bm25 import BM25Okapi
3
+ import numpy as np
4
+ from transformers import AutoTokenizer
5
+ from rank_bm25 import BM25Okapi
6
+ import numpy as np
7
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
8
+ from langchain.embeddings import HuggingFaceEmbeddings
9
+ from langchain.vectorstores import FAISS
10
+ import pandas as pd
11
+
12
+ dataset = pd.read_csv("filtered_133k_data_cleanlab.csv")
13
+ df1 = dataset[['text' , 'label' , "Chat_ID" , "x" , "y"]].dropna()
14
+ df2 = dataset[["text", "classifier_label" , "Chat_ID" , "scores_proba_countvectr" , "x" , "y"]].dropna()
15
+ df2 = df2[df2.scores_proba_countvectr > 0.9]
16
+
17
+ df2 = df2[["text" , "classifier_label" , "Chat_ID" , "x" , "y"]]
18
+ df2.columns = ["text" , "label" , "Chat_ID" , "x" , "y"]
19
+ dataset = pd.concat( (df1 , df2) ).reset_index(drop=True)
20
+ dataset = dataset.sample(frac = 1).reset_index(drop=True)
21
+
22
+
23
+
24
+ class KeyWordSearch:
25
+
26
+ def __init__(self, corpus: pd.DataFrame, tokenizer = None):
27
+ """
28
+
29
+ """
30
+ self.corpus = corpus
31
+ self.tokenizer = tokenizer # if you want
32
+ self.tokenized_corpus = [doc.split(" ") for doc in self.corpus['text']]
33
+ self.search_engine = BM25Okapi(self.tokenized_corpus)
34
+
35
+ def get_top_10(self , query):
36
+ tokenized_query = query.split(" ")
37
+ scores = self.search_engine.get_scores(tokenized_query)
38
+ sorted_indices = np.argsort(scores)
39
+ top_indices = []
40
+ for idx in reversed(sorted_indices):
41
+ top_indices.append(idx)
42
+ if len(top_indices) == 10:
43
+ break
44
+
45
+ top_results = []
46
+
47
+ for top_index in top_indices:
48
+ top_results.append({
49
+ "positive" : query,
50
+ "look_up": self.corpus['text'].iloc[top_index],
51
+ "score": scores[top_index],
52
+ })
53
+ top_results = pd.DataFrame(top_results)
54
+ return dict(zip(top_results.look_up.tolist() , top_results.score.tolist()))
55
+
56
+ class VectorSearch:
57
+
58
+ def __init__(self, corpus):
59
+ """
60
+ corpus : list of text
61
+ """
62
+ self.text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=150)
63
+
64
+ self.docs = self.text_splitter.create_documents(corpus)
65
+
66
+ modelPath = "omarelsayeed/bert_large_mnr"
67
+
68
+ model_kwargs = {'device': 'cpu'}
69
+
70
+ encode_kwargs = {'normalize_embeddings': False}
71
+
72
+ self.embeddings = HuggingFaceEmbeddings(
73
+ model_name=modelPath,
74
+ model_kwargs=model_kwargs,
75
+ encode_kwargs=encode_kwargs
76
+ )
77
+
78
+ self.db = FAISS.from_documents(self.docs, self.embeddings)
79
+ self.retriever = self.db.as_retriever()
80
+
81
+ def search_query(self, query):
82
+ return (pd.DataFrame([[x.page_content, y] for x, y in self.db.similarity_search_with_score(query , k=10)]),
83
+ self.db.max_marginal_relevance_search(query , k=10 , return_score=True))
84
  import gradio as gr
85
+ import pandas as pd
86
+
87
+ df = pd.read_csv('filtered_133k_data_cleanlab.csv')
88
+ class CurrentLabel:
89
+ current_label = None
90
+
91
+ class VCC:
92
+ def __init__(self):
93
+ self.vcc = None
94
+ self.current_label = None
95
+
96
+ def filter_corpus(self, label, search_query, search_method):
97
+ corpus = df[df['label'] == label]
98
+ kw = KeyWordSearch(corpus)
99
+ # Implement your search functions (BM25 and Semantic) here and get the search results
100
+ search_results = ""
101
+
102
+ if search_method == "BM25":
103
+ return kw.get_top_10(search_query)
104
+
105
+ if search_method == "Semantic":
106
+ if CurrentLabel.current_label != label:
107
+ CurrentLabel.current_label = label
108
+ self.vcc = VectorSearch(corpus.text.tolist())
109
+
110
+ results = self.vcc.db.similarity_search_with_score(search_query , k = 10)
111
+ results = [(x.page_content , y) for x, y in results]
112
+ res = [x[0] for x in results]
113
+ score = [x[1] for x in results]
114
+ sc = [float(x) for x in score]
115
+ return dict(zip(res , sc))
116
+
117
+ # Format and return the search results as a string
118
+ if search_results == "":
119
+ search_results = "No results found."
120
+ return search_results
121
+
122
+ v = VCC()
123
+
124
+
125
+ # Create a Gradio interface
126
+ label_dropdown = gr.inputs.Dropdown(choices=list(df['label'].unique()), label="Select Label")
127
+ search_query_input = gr.inputs.Textbox(label="Search Query")
128
+ search_method_radio = gr.inputs.Radio(["BM25", "Semantic"], label="Search Method")
129
+
130
+
131
+ search_interface = gr.Interface(
132
+ fn=v.filter_corpus,
133
+ inputs=[label_dropdown, search_query_input, search_method_radio],
134
+ outputs=gr.outputs.Label(label="Search Results"),
135
+ title="Search and Filter Corpus"
136
+ )
137
+
138
+ search_interface.launch()