Chris4K commited on
Commit
8ad24b7
·
verified ·
1 Parent(s): a8ae89c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +330 -266
app.py CHANGED
@@ -5,39 +5,82 @@ import docx
5
  import nltk
6
  import gradio as gr
7
  from langchain_huggingface import HuggingFaceEmbeddings
8
- from langchain_community.embeddings import (
9
- OpenAIEmbeddings,
10
- CohereEmbeddings,
11
- )
12
  from langchain_openai import OpenAIEmbeddings
13
  from langchain_community.vectorstores import FAISS, Chroma
14
- from langchain_text_splitters import (
15
- RecursiveCharacterTextSplitter,
16
- TokenTextSplitter,
17
- )
18
  from typing import List, Dict, Any
19
  import pandas as pd
20
-
21
- nltk.download('punkt', quiet=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
  FILES_DIR = './files'
24
 
25
- MODELS = {
26
- 'HuggingFace': {
27
- 'e5-base-de': "danielheinz/e5-base-sts-en-de",
28
- 'paraphrase-miniLM': "paraphrase-multilingual-MiniLM-L12-v2",
29
- 'paraphrase-mpnet': "paraphrase-multilingual-mpnet-base-v2",
30
- 'gte-large': "gte-large",
31
- 'gbert-base': "gbert-base"
32
- },
33
- 'OpenAI': {
34
- 'text-embedding-ada-002': "text-embedding-ada-002"
35
- },
36
- 'Cohere': {
37
- 'embed-multilingual-v2.0': "embed-multilingual-v2.0"
38
- }
39
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
40
 
 
 
 
 
 
 
41
  class FileHandler:
42
  @staticmethod
43
  def extract_text(file_path):
@@ -66,13 +109,69 @@ class FileHandler:
66
  with open(file_path, 'r', encoding='utf-8') as f:
67
  return f.read()
68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
69
  def get_embedding_model(model_type, model_name):
 
70
  if model_type == 'HuggingFace':
71
- return HuggingFaceEmbeddings(model_name=MODELS[model_type][model_name])
72
  elif model_type == 'OpenAI':
73
- return OpenAIEmbeddings(model=MODELS[model_type][model_name])
74
  elif model_type == 'Cohere':
75
- return CohereEmbeddings(model=MODELS[model_type][model_name])
76
  else:
77
  raise ValueError(f"Unsupported model type: {model_type}")
78
 
@@ -88,23 +187,39 @@ def get_text_splitter(split_strategy, chunk_size, overlap_size, custom_separator
88
  else:
89
  raise ValueError(f"Unsupported split strategy: {split_strategy}")
90
 
91
- def get_vector_store(store_type, texts, embedding_model):
92
- if store_type == 'FAISS':
93
- return FAISS.from_texts(texts, embedding_model)
94
- elif store_type == 'Chroma':
95
- return Chroma.from_texts(texts, embedding_model)
 
 
 
 
 
 
 
 
 
 
 
96
  else:
97
- raise ValueError(f"Unsupported vector store type: {store_type}")
98
 
99
- def get_retriever(vector_store, search_type, search_kwargs=None):
 
100
  if search_type == 'similarity':
101
  return vector_store.as_retriever(search_type="similarity", search_kwargs=search_kwargs)
102
  elif search_type == 'mmr':
103
  return vector_store.as_retriever(search_type="mmr", search_kwargs=search_kwargs)
 
 
 
104
  else:
105
  raise ValueError(f"Unsupported search type: {search_type}")
106
 
107
- def process_files(file_path, model_type, model_name, split_strategy, chunk_size, overlap_size, custom_separators):
 
108
  if file_path:
109
  text = FileHandler.extract_text(file_path)
110
  else:
@@ -112,6 +227,12 @@ def process_files(file_path, model_type, model_name, split_strategy, chunk_size,
112
  for file in os.listdir(FILES_DIR):
113
  file_path = os.path.join(FILES_DIR, file)
114
  text += FileHandler.extract_text(file_path)
 
 
 
 
 
 
115
 
116
  text_splitter = get_text_splitter(split_strategy, chunk_size, overlap_size, custom_separators)
117
  chunks = text_splitter.split_text(text)
@@ -120,28 +241,105 @@ def process_files(file_path, model_type, model_name, split_strategy, chunk_size,
120
 
121
  return chunks, embedding_model, len(text.split())
122
 
123
- def search_embeddings(chunks, embedding_model, vector_store_type, search_type, query, top_k):
 
 
124
  vector_store = get_vector_store(vector_store_type, chunks, embedding_model)
125
  retriever = get_retriever(vector_store, search_type, {"k": top_k})
126
 
127
  start_time = time.time()
128
- results = retriever.get_relevant_documents(query)
129
- end_time = time.time()
 
 
 
 
130
 
131
- return results, end_time - start_time, vector_store
 
132
 
133
- def calculate_statistics(results, search_time, vector_store, num_tokens, embedding_model):
134
- return {
 
 
 
 
 
 
 
 
 
 
 
 
135
  "num_results": len(results),
136
- "avg_content_length": sum(len(doc.page_content) for doc in results) / len(results) if results else 0,
 
 
137
  "search_time": search_time,
138
  "vector_store_size": vector_store._index.ntotal if hasattr(vector_store, '_index') else "N/A",
139
  "num_documents": len(vector_store.docstore._dict),
140
  "num_tokens": num_tokens,
141
- "embedding_vocab_size": embedding_model.client.get_vocab_size() if hasattr(embedding_model, 'client') and hasattr(embedding_model.client, 'get_vocab_size') else "N/A"
 
 
142
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
143
 
144
- def compare_embeddings(file, query, model_types, model_names, split_strategy, chunk_size, overlap_size, custom_separators, vector_store_type, search_type, top_k):
 
145
  all_results = []
146
  all_stats = []
147
  settings = {
@@ -151,7 +349,11 @@ def compare_embeddings(file, query, model_types, model_names, split_strategy, ch
151
  "custom_separators": custom_separators,
152
  "vector_store_type": vector_store_type,
153
  "search_type": search_type,
154
- "top_k": top_k
 
 
 
 
155
  }
156
 
157
  for model_type, model_name in zip(model_types, model_names):
@@ -162,19 +364,31 @@ def compare_embeddings(file, query, model_types, model_names, split_strategy, ch
162
  split_strategy,
163
  chunk_size,
164
  overlap_size,
165
- custom_separators.split(',') if custom_separators else None
 
 
166
  )
167
 
168
- results, search_time, vector_store = search_embeddings(
 
 
 
 
 
 
 
 
169
  chunks,
170
  embedding_model,
171
  vector_store_type,
172
  search_type,
173
  query,
174
- top_k
 
 
175
  )
176
 
177
- stats = calculate_statistics(results, search_time, vector_store, num_tokens, embedding_model)
178
  stats["model"] = f"{model_type} - {model_name}"
179
  stats.update(settings)
180
 
@@ -185,231 +399,81 @@ def compare_embeddings(file, query, model_types, model_names, split_strategy, ch
185
  results_df = pd.DataFrame(all_results)
186
  stats_df = pd.DataFrame(all_stats)
187
 
188
- return results_df, stats_df
 
 
 
189
 
190
  def format_results(results, stats):
191
  formatted_results = []
192
  for doc in results:
193
  result = {
194
- "Content": doc.page_content,
195
  "Model": stats["model"],
 
 
196
  **doc.metadata,
197
  **{k: v for k, v in stats.items() if k not in ["model"]}
198
  }
199
  formatted_results.append(result)
200
  return formatted_results
201
 
202
- # Gradio interface
203
- iface = gr.Interface(
204
- fn=compare_embeddings,
205
- inputs=[
206
- gr.File(label="Upload File (Optional)"),
207
- gr.Textbox(label="Search Query"),
208
- gr.CheckboxGroup(choices=list(MODELS.keys()), label="Embedding Model Types", value=["HuggingFace"]),
209
- gr.CheckboxGroup(choices=[model for models in MODELS.values() for model in models], label="Embedding Models", value=["e5-base-de"]),
210
- gr.Radio(choices=["token", "recursive"], label="Split Strategy", value="recursive"),
211
- gr.Slider(100, 1000, step=100, value=500, label="Chunk Size"),
212
- gr.Slider(0, 100, step=10, value=50, label="Overlap Size"),
213
- gr.Textbox(label="Custom Split Separators (comma-separated, optional)"),
214
- gr.Radio(choices=["FAISS", "Chroma"], label="Vector Store Type", value="FAISS"),
215
- gr.Radio(choices=["similarity", "mmr"], label="Search Type", value="similarity"),
216
- gr.Slider(1, 10, step=1, value=5, label="Top K")
217
- ],
218
- outputs=[
219
- gr.Dataframe(label="Results"),
220
- gr.Dataframe(label="Statistics")
221
- ],
222
- title="Embedding Comparison Tool",
223
- description="Compare different embedding models and retrieval strategies",
224
- examples=[
225
- [ "files/test.txt", "What is machine learning?", ["HuggingFace"], ["e5-base-de"], "recursive", 500, 50, "", "FAISS", "similarity", 5]
226
- ],
227
- flagging_mode="never"
228
- )
229
-
230
- # The code remains the same as in the previous artifact, so I'll omit it here for brevity.
231
- # The changes will be in the tutorial_md variable.
232
-
233
- tutorial_md = """
234
- # Embedding Comparison Tool Tutorial
235
-
236
- This tool allows you to compare different embedding models and retrieval strategies for document search. Before we dive into how to use the tool, let's cover some important concepts.
237
-
238
- ## What is RAG?
239
-
240
- RAG stands for Retrieval-Augmented Generation. It's a technique that combines the strength of large language models with the ability to access and use external knowledge. RAG is particularly useful for:
241
-
242
- - Providing up-to-date information
243
- - Answering questions based on specific documents or data sources
244
- - Reducing hallucinations in AI responses
245
- - Customizing AI outputs for specific domains or use cases
246
-
247
- RAG is good for applications where you need accurate, context-specific information retrieval combined with natural language generation. This includes chatbots, question-answering systems, and document analysis tools.
248
-
249
- ## Key Components of RAG
250
-
251
- ### 1. Document Loading
252
-
253
- This is the process of ingesting documents from various sources (PDFs, web pages, databases, etc.) into a format that can be processed by the RAG system. Efficient document loading is crucial for handling large volumes of data.
254
-
255
- ### 2. Document Splitting
256
-
257
- Large documents are often split into smaller chunks for more efficient processing and retrieval. The choice of splitting method can significantly impact the quality of retrieval results.
258
-
259
- ### 3. Vector Store and Embeddings
260
-
261
- Embeddings are dense vector representations of text that capture semantic meaning. A vector store is a database optimized for storing and querying these high-dimensional vectors. Together, they allow for efficient semantic search.
262
-
263
- ### 4. Retrieval
264
-
265
- This is the process of finding the most relevant documents or chunks based on a query. The quality of retrieval directly impacts the final output of the RAG system.
266
-
267
- ## Why is this important?
268
-
269
- Understanding and optimizing each component of the RAG pipeline is crucial because:
270
-
271
- 1. It affects the accuracy and relevance of the information retrieved.
272
- 2. It impacts the speed and efficiency of the system.
273
- 3. It determines the scalability of your solution.
274
- 4. It influences the overall quality of the generated responses.
275
-
276
- ## Impact of Parameter Changes
277
-
278
- Changes in various parameters can have significant effects:
279
-
280
- - **Chunk Size**: Larger chunks provide more context but may reduce precision. Smaller chunks increase precision but may lose context.
281
- - **Overlap**: More overlap can help maintain context between chunks but increases computational load.
282
- - **Embedding Model**: Different models have varying performance across languages and domains.
283
- - **Vector Store**: Affects query speed and the types of searches you can perform.
284
- - **Retrieval Method**: Impacts the diversity and relevance of retrieved documents.
285
-
286
- ## Detailed Parameter Explanations
287
-
288
- ### Embedding Model
289
-
290
- The embedding model translates text into numerical vectors. The choice of model affects:
291
-
292
- - **Language Coverage**: Some models are monolingual, others are multilingual.
293
- - **Domain Specificity**: Models can be general or trained on specific domains (e.g., legal, medical).
294
- - **Vector Dimensions**: Higher dimensions can capture more information but require more storage and computation.
295
-
296
- #### Vocabulary Size
297
-
298
- The vocab size refers to the number of unique tokens the model recognizes. It's important because:
299
-
300
- - It affects the model's ability to handle rare words or specialized terminology.
301
- - Larger vocabs can lead to better performance but require more memory.
302
- - It impacts the model's performance across different languages (larger vocabs are often better for multilingual models).
303
-
304
- ### Split Strategy
305
-
306
- - **Token**: Splits based on a fixed number of tokens. Good for maintaining consistent chunk sizes.
307
- - **Recursive**: Splits based on content, trying to maintain semantic coherence. Better for preserving context.
308
-
309
- ### Vector Store Type
310
-
311
- - **FAISS**: Fast, memory-efficient. Good for large-scale similarity search.
312
- - **Chroma**: Offers additional features like metadata filtering. Good for more complex querying needs.
313
-
314
- ### Search Type
315
-
316
- - **Similarity**: Returns the most similar documents. Fast and straightforward.
317
- - **MMR (Maximum Marginal Relevance)**: Balances relevance with diversity in results. Useful for getting a broader perspective.
318
-
319
- ## MTEB (Massive Text Embedding Benchmark)
320
-
321
- MTEB is a comprehensive benchmark for evaluating text embedding models across a wide range of tasks and languages. It's useful for:
322
-
323
- - Comparing the performance of different embedding models.
324
- - Understanding how models perform on specific tasks (e.g., classification, clustering, retrieval).
325
- - Selecting the best model for your specific use case.
326
-
327
- ### Finding Embeddings on MTEB Leaderboard
328
-
329
- To find suitable embeddings using the MTEB leaderboard (https://huggingface.co/spaces/mteb/leaderboard):
330
-
331
- 1. Look at the "Avg" column for overall performance across all tasks.
332
- 2. Check performance on specific task types relevant to your use case (e.g., Retrieval, Classification).
333
- 3. Consider the model size and inference speed for your deployment constraints.
334
- 4. Look at language-specific scores if you're working with non-English text.
335
- 5. Click on model names to get more details and links to the model pages on Hugging Face.
336
-
337
- When selecting a model, balance performance with practical considerations like model size, inference speed, and specific task performance relevant to your application.
338
-
339
- By understanding these concepts and parameters, you can make informed decisions when using the Embedding Comparison Tool and optimize your RAG system for your specific needs.
340
-
341
- ## Using the Embedding Comparison Tool
342
-
343
- Now that you understand the underlying concepts, here's how to use the tool:
344
-
345
- 1. **File Upload**: Optionally upload a file (PDF, DOCX, or TXT) or leave it empty to use files in the `./files` directory.
346
-
347
- 2. **Search Query**: Enter the search query you want to use for retrieving relevant documents.
348
-
349
- 3. **Embedding Model Types**: Select one or more embedding model types (HuggingFace, OpenAI, Cohere).
350
-
351
- 4. **Embedding Models**: Choose specific models for each selected model type.
352
-
353
- 5. **Split Strategy**: Select either 'token' or 'recursive' for text splitting.
354
-
355
- 6. **Chunk Size**: Set the size of text chunks (100-1000).
356
-
357
- 7. **Overlap Size**: Set the overlap between chunks (0-100).
358
-
359
- 8. **Custom Split Separators**: Optionally enter custom separators for text splitting.
360
-
361
- 9. **Vector Store Type**: Choose between FAISS and Chroma for storing vectors.
362
-
363
- 10. **Search Type**: Select 'similarity' or 'mmr' (Maximum Marginal Relevance) search.
364
-
365
- 11. **Top K**: Set the number of top results to retrieve (1-10).
366
-
367
- After setting these parameters, click "Submit" to run the comparison. The results will be displayed in two tables:
368
-
369
- - **Results**: Shows the retrieved document contents and metadata for each model.
370
- - **Statistics**: Provides performance metrics and settings for each model.
371
-
372
- You can download the results as CSV files for further analysis.
373
-
374
-
375
- ## Useful Resources and Links
376
-
377
- Here are some valuable resources to help you better understand and work with embeddings, retrieval systems, and natural language processing:
378
-
379
- ### Embeddings and Vector Databases
380
- - [Understanding Embeddings](https://www.tensorflow.org/text/guide/word_embeddings): A guide by TensorFlow on word embeddings
381
- - [FAISS: A Library for Efficient Similarity Search](https://github.com/facebookresearch/faiss): Facebook AI's vector similarity search library
382
- - [Chroma: The AI-native open-source embedding database](https://www.trychroma.com/): An embedding database designed for AI applications
383
-
384
- ### Natural Language Processing
385
- - [NLTK (Natural Language Toolkit)](https://www.nltk.org/): A leading platform for building Python programs to work with human language data
386
- - [spaCy](https://spacy.io/): Industrial-strength Natural Language Processing in Python
387
- - [Hugging Face Transformers](https://huggingface.co/transformers/): State-of-the-art Natural Language Processing for PyTorch and TensorFlow 2.0
388
-
389
- ### Retrieval-Augmented Generation (RAG)
390
- - [LangChain](https://python.langchain.com/docs/get_started/introduction): A framework for developing applications powered by language models
391
- - [OpenAI's RAG Tutorial](https://platform.openai.com/docs/tutorials/web-qa-embeddings): A guide on building a QA system with embeddings
392
-
393
- ### German Language Processing
394
- - [Kölner Phonetik](https://en.wikipedia.org/wiki/Cologne_phonetics): Information about the Kölner Phonetik algorithm
395
- - [German NLP Resources](https://github.com/adbar/German-NLP): A curated list of open-access resources for German NLP
396
-
397
- ### Benchmarks and Evaluation
398
- - [MTEB Leaderboard](https://huggingface.co/spaces/mteb/leaderboard): Massive Text Embedding Benchmark leaderboard
399
- - [GLUE Benchmark](https://gluebenchmark.com/): General Language Understanding Evaluation benchmark
400
-
401
- ### Tools and Libraries
402
- - [Gensim](https://radimrehurek.com/gensim/): Topic modelling for humans
403
- - [Sentence-Transformers](https://www.sbert.net/): A Python framework for state-of-the-art sentence, text and image embeddings
404
-
405
-
406
- Experiment with different settings to find the best combination for your specific use case!
407
- """
408
-
409
- # The rest of the code remains the same
410
- iface = gr.TabbedInterface(
411
- [iface, gr.Markdown(tutorial_md)],
412
- ["Embedding Comparison", "Tutorial"]
413
- )
414
-
415
- iface.launch(share=True)
 
5
  import nltk
6
  import gradio as gr
7
  from langchain_huggingface import HuggingFaceEmbeddings
8
+ from langchain_community.embeddings import CohereEmbeddings
 
 
 
9
  from langchain_openai import OpenAIEmbeddings
10
  from langchain_community.vectorstores import FAISS, Chroma
11
+ from langchain_text_splitters import RecursiveCharacterTextSplitter, TokenTextSplitter
 
 
 
12
  from typing import List, Dict, Any
13
  import pandas as pd
14
+ import numpy as np
15
+ import re
16
+ from nltk.corpus import stopwords
17
+ from nltk.tokenize import word_tokenize
18
+ from nltk.stem import SnowballStemmer
19
+ import jellyfish
20
+ from gensim.models import Word2Vec
21
+ from gensim.models.fasttext import FastText
22
+ from collections import Counter
23
+ from tokenizers import Tokenizer
24
+ from tokenizers.models import WordLevel
25
+ from tokenizers.trainers import WordLevelTrainer
26
+ from tokenizers.pre_tokenizers import Whitespace
27
+ import matplotlib.pyplot as plt
28
+ import seaborn as sns
29
+ from sklearn.manifold import TSNE
30
+ from sklearn.metrics import silhouette_score
31
+ from scipy.stats import spearmanr
32
+ from functools import lru_cache
33
+
34
+ # NLTK Resource Download
35
+ def download_nltk_resources():
36
+ resources = ['punkt', 'stopwords', 'snowball_data']
37
+ for resource in resources:
38
+ try:
39
+ nltk.download(resource, quiet=True)
40
+ except Exception as e:
41
+ print(f"Failed to download {resource}: {str(e)}")
42
+
43
+ download_nltk_resources()
44
 
45
  FILES_DIR = './files'
46
 
47
+ # Model Management
48
+ class ModelManager:
49
+ def __init__(self):
50
+ self.models = {
51
+ 'HuggingFace': {
52
+ 'e5-base-de': "danielheinz/e5-base-sts-en-de",
53
+ 'paraphrase-miniLM': "paraphrase-multilingual-MiniLM-L12-v2",
54
+ 'paraphrase-mpnet': "paraphrase-multilingual-mpnet-base-v2",
55
+ 'gte-large': "gte-large",
56
+ 'gbert-base': "gbert-base"
57
+ },
58
+ 'OpenAI': {
59
+ 'text-embedding-ada-002': "text-embedding-ada-002"
60
+ },
61
+ 'Cohere': {
62
+ 'embed-multilingual-v2.0': "embed-multilingual-v2.0"
63
+ }
64
+ }
65
+
66
+ def add_model(self, provider, name, model_path):
67
+ if provider not in self.models:
68
+ self.models[provider] = {}
69
+ self.models[provider][name] = model_path
70
+
71
+ def remove_model(self, provider, name):
72
+ if provider in self.models and name in self.models[provider]:
73
+ del self.models[provider][name]
74
+
75
+ def get_model(self, provider, name):
76
+ return self.models.get(provider, {}).get(name)
77
 
78
+ def list_models(self):
79
+ return {provider: list(models.keys()) for provider, models in self.models.items()}
80
+
81
+ model_manager = ModelManager()
82
+
83
+ # File Handling
84
  class FileHandler:
85
  @staticmethod
86
  def extract_text(file_path):
 
109
  with open(file_path, 'r', encoding='utf-8') as f:
110
  return f.read()
111
 
112
+ # Text Processing
113
+ def simple_tokenize(text):
114
+ return text.split()
115
+
116
+ def preprocess_text(text, lang='german'):
117
+ text = text.lower()
118
+ text = re.sub(r'[^a-zA-Z\s]', '', text)
119
+
120
+ try:
121
+ tokens = word_tokenize(text, language=lang)
122
+ except LookupError:
123
+ print(f"Warning: NLTK punkt tokenizer for {lang} not found. Using simple tokenization.")
124
+ tokens = simple_tokenize(text)
125
+
126
+ try:
127
+ stop_words = set(stopwords.words(lang))
128
+ except LookupError:
129
+ print(f"Warning: Stopwords for {lang} not found. Skipping stopword removal.")
130
+ stop_words = set()
131
+ tokens = [token for token in tokens if token not in stop_words]
132
+
133
+ try:
134
+ stemmer = SnowballStemmer(lang)
135
+ tokens = [stemmer.stem(token) for token in tokens]
136
+ except ValueError:
137
+ print(f"Warning: SnowballStemmer for {lang} not available. Skipping stemming.")
138
+
139
+ return ' '.join(tokens)
140
+
141
+ def phonetic_match(text, query, method='levenshtein_distance'):
142
+ if method == 'levenshtein_distance':
143
+ text_phonetic = jellyfish.soundex(text)
144
+ #query_phonetic = jellyfish.cologne_phonetic(query)
145
+ query_phonetic = jellyfish.soundex(query)
146
+ return jellyfish.levenshtein_distance(text_phonetic, query_phonetic)
147
+ return 0
148
+
149
+ # Custom Tokenizer
150
+ def create_custom_tokenizer(file_path):
151
+ with open(file_path, 'r', encoding='utf-8') as f:
152
+ text = f.read()
153
+
154
+ tokenizer = Tokenizer(WordLevel(unk_token="[UNK]"))
155
+ tokenizer.pre_tokenizer = Whitespace()
156
+
157
+ trainer = WordLevelTrainer(special_tokens=["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]"])
158
+ tokenizer.train_from_iterator([text], trainer)
159
+
160
+ return tokenizer
161
+
162
+ def custom_tokenize(text, tokenizer):
163
+ return tokenizer.encode(text).tokens
164
+
165
+ # Embedding and Vector Store
166
+ @lru_cache(maxsize=None)
167
  def get_embedding_model(model_type, model_name):
168
+ model_path = model_manager.get_model(model_type, model_name)
169
  if model_type == 'HuggingFace':
170
+ return HuggingFaceEmbeddings(model_name=model_path)
171
  elif model_type == 'OpenAI':
172
+ return OpenAIEmbeddings(model=model_path)
173
  elif model_type == 'Cohere':
174
+ return CohereEmbeddings(model=model_path)
175
  else:
176
  raise ValueError(f"Unsupported model type: {model_type}")
177
 
 
187
  else:
188
  raise ValueError(f"Unsupported split strategy: {split_strategy}")
189
 
190
+ def get_vector_store(vector_store_type, chunks, embedding_model):
191
+ # Convert chunks to a tuple to make it hashable
192
+ chunks_tuple = tuple(chunks)
193
+
194
+ # Use a helper function for the actual vector store creation
195
+ return _create_vector_store(vector_store_type, chunks_tuple, embedding_model)
196
+
197
+
198
+ def _create_vector_store(vector_store_type, chunks_tuple, embedding_model):
199
+ # Convert the tuple back to a list for use with the vector store
200
+ chunks = list(chunks_tuple)
201
+
202
+ if vector_store_type == 'FAISS':
203
+ return FAISS.from_texts(chunks, embedding_model)
204
+ elif vector_store_type == 'Chroma':
205
+ return Chroma.from_texts(chunks, embedding_model)
206
  else:
207
+ raise ValueError(f"Unsupported vector store type: {vector_store_type}")
208
 
209
+
210
+ def get_retriever(vector_store, search_type, search_kwargs):
211
  if search_type == 'similarity':
212
  return vector_store.as_retriever(search_type="similarity", search_kwargs=search_kwargs)
213
  elif search_type == 'mmr':
214
  return vector_store.as_retriever(search_type="mmr", search_kwargs=search_kwargs)
215
+ elif search_type == 'custom':
216
+ # Implement custom retriever logic here
217
+ pass
218
  else:
219
  raise ValueError(f"Unsupported search type: {search_type}")
220
 
221
+ # Main Processing Functions
222
+ def process_files(file_path, model_type, model_name, split_strategy, chunk_size, overlap_size, custom_separators, lang='german', custom_tokenizer_file=None):
223
  if file_path:
224
  text = FileHandler.extract_text(file_path)
225
  else:
 
227
  for file in os.listdir(FILES_DIR):
228
  file_path = os.path.join(FILES_DIR, file)
229
  text += FileHandler.extract_text(file_path)
230
+
231
+ if custom_tokenizer_file:
232
+ tokenizer = create_custom_tokenizer(custom_tokenizer_file)
233
+ text = ' '.join(custom_tokenize(text, tokenizer))
234
+ else:
235
+ text = preprocess_text(text, lang)
236
 
237
  text_splitter = get_text_splitter(split_strategy, chunk_size, overlap_size, custom_separators)
238
  chunks = text_splitter.split_text(text)
 
241
 
242
  return chunks, embedding_model, len(text.split())
243
 
244
+ def search_embeddings(chunks, embedding_model, vector_store_type, search_type, query, top_k, lang='german', phonetic_weight=0.3):
245
+ preprocessed_query = preprocess_text(query, lang)
246
+
247
  vector_store = get_vector_store(vector_store_type, chunks, embedding_model)
248
  retriever = get_retriever(vector_store, search_type, {"k": top_k})
249
 
250
  start_time = time.time()
251
+ results = retriever.invoke(preprocessed_query)
252
+
253
+ def score_result(doc):
254
+ similarity_score = vector_store.similarity_search_with_score(doc.page_content, k=1)[0][1]
255
+ phonetic_score = phonetic_match(doc.page_content, query)
256
+ return (1 - phonetic_weight) * similarity_score + phonetic_weight * phonetic_score
257
 
258
+ results = sorted(results, key=score_result, reverse=True)
259
+ end_time = time.time()
260
 
261
+ # Extract embeddings for each result and store them in the DataFrame
262
+ embeddings = [embedding_model.embed_query(doc.page_content) for doc in results]
263
+
264
+ # Create a DataFrame with the results and embeddings
265
+ results_df = pd.DataFrame({
266
+ 'content': [doc.page_content for doc in results],
267
+ 'embedding': embeddings
268
+ })
269
+
270
+ return results_df, end_time - start_time, vector_store, results_raw
271
+
272
+ # Evaluation Metrics
273
+ def calculate_statistics(results, search_time, vector_store, num_tokens, embedding_model, query, top_k):
274
+ stats = {
275
  "num_results": len(results),
276
+ # "avg_content_length": sum(len(doc.page_content) for doc in results) / len(results) if results else 0,
277
+
278
+ "avg_content_length": np.mean([len(doc.page_content) for doc in results]) if not results.empty else 0,
279
  "search_time": search_time,
280
  "vector_store_size": vector_store._index.ntotal if hasattr(vector_store, '_index') else "N/A",
281
  "num_documents": len(vector_store.docstore._dict),
282
  "num_tokens": num_tokens,
283
+ "embedding_vocab_size": embedding_model.client.get_vocab_size() if hasattr(embedding_model, 'client') and hasattr(embedding_model.client, 'get_vocab_size') else "N/A",
284
+ "embedding_dimension": len(embedding_model.embed_query(query)),
285
+ "top_k": top_k,
286
  }
287
+
288
+ if len(results) > 1000:
289
+ embeddings = [embedding_model.embed_query(doc.page_content) for doc in results]
290
+ pairwise_similarities = np.inner(embeddings, embeddings)
291
+ stats["result_diversity"] = 1 - np.mean(pairwise_similarities[np.triu_indices(len(embeddings), k=1)])
292
+
293
+ # Silhouette Score
294
+ if len(embeddings) > 2:
295
+ print('-----')
296
+ #stats["silhouette_score"] = "N/A"
297
+ stats["silhouette_score"] = silhouette_score(embeddings, range(len(embeddings)))
298
+ else:
299
+ stats["silhouette_score"] = "N/A"
300
+ else:
301
+ stats["result_diversity"] = "N/A"
302
+ stats["silhouette_score"] = "N/A"
303
+
304
+ query_embedding = embedding_model.embed_query(query)
305
+ result_embeddings = [embedding_model.embed_query(doc.page_content) for doc in results]
306
+ similarities = [np.inner(query_embedding, emb) for emb in result_embeddings]
307
+ similarities = [np.inner(query_embedding, emb)[0] for emb in result_embeddings]
308
+ rank_correlation, _ = spearmanr(similarities, range(len(similarities)))
309
+ stats["rank_correlation"] = rank_correlation
310
+
311
+ return stats
312
+
313
+ # Visualization
314
+ def visualize_results(results_df, stats_df):
315
+ fig, axs = plt.subplots(2, 2, figsize=(20, 20))
316
+
317
+ sns.barplot(x='model', y='search_time', data=stats_df, ax=axs[0, 0])
318
+ axs[0, 0].set_title('Search Time by Model')
319
+ axs[0, 0].set_xticklabels(axs[0, 0].get_xticklabels(), rotation=45, ha='right')
320
+
321
+ sns.scatterplot(x='result_diversity', y='rank_correlation', hue='model', data=stats_df, ax=axs[0, 1])
322
+ axs[0, 1].set_title('Result Diversity vs. Rank Correlation')
323
+
324
+ sns.boxplot(x='model', y='avg_content_length', data=stats_df, ax=axs[1, 0])
325
+ axs[1, 0].set_title('Distribution of Result Content Lengths')
326
+ axs[1, 0].set_xticklabels(axs[1, 0].get_xticklabels(), rotation=45, ha='right')
327
+
328
+ embeddings = np.array([embedding for embedding in results_df['embedding'] if isinstance(embedding, np.ndarray)])
329
+ if len(embeddings) > 1:
330
+ tsne = TSNE(n_components=2, random_state=42)
331
+ embeddings_2d = tsne.fit_transform(embeddings)
332
+
333
+ sns.scatterplot(x=embeddings_2d[:, 0], y=embeddings_2d[:, 1], hue=results_df['model'][:len(embeddings)], ax=axs[1, 1])
334
+ axs[1, 1].set_title('t-SNE Visualization of Result Embeddings')
335
+ else:
336
+ axs[1, 1].text(0.5, 0.5, "Not enough data for t-SNE visualization", ha='center', va='center')
337
+
338
+ plt.tight_layout()
339
+ return fig
340
 
341
+ # Main Comparison Function
342
+ def compare_embeddings(file, query, model_types, model_names, split_strategy, chunk_size, overlap_size, custom_separators, vector_store_type, search_type, top_k, lang='german', use_custom_embedding=False, optimize_vocab=False, phonetic_weight=0.3, custom_tokenizer_file=None):
343
  all_results = []
344
  all_stats = []
345
  settings = {
 
349
  "custom_separators": custom_separators,
350
  "vector_store_type": vector_store_type,
351
  "search_type": search_type,
352
+ "top_k": top_k,
353
+ "lang": lang,
354
+ "use_custom_embedding": use_custom_embedding,
355
+ "optimize_vocab": optimize_vocab,
356
+ "phonetic_weight": phonetic_weight
357
  }
358
 
359
  for model_type, model_name in zip(model_types, model_names):
 
364
  split_strategy,
365
  chunk_size,
366
  overlap_size,
367
+ custom_separators.split(',') if custom_separators else None,
368
+ lang,
369
+ custom_tokenizer_file
370
  )
371
 
372
+ if use_custom_embedding:
373
+ custom_model = create_custom_embedding(chunks)
374
+ embedding_model = CustomEmbeddings(custom_model)
375
+
376
+ if optimize_vocab:
377
+ tokenizer, optimized_chunks = optimize_vocabulary(chunks)
378
+ chunks = optimized_chunks
379
+
380
+ results, search_time, vector_store, results_raw = search_embeddings(
381
  chunks,
382
  embedding_model,
383
  vector_store_type,
384
  search_type,
385
  query,
386
+ top_k,
387
+ lang,
388
+ phonetic_weight
389
  )
390
 
391
+ stats = calculate_statistics(results_raw, search_time, vector_store, num_tokens, embedding_model, query, top_k)
392
  stats["model"] = f"{model_type} - {model_name}"
393
  stats.update(settings)
394
 
 
399
  results_df = pd.DataFrame(all_results)
400
  stats_df = pd.DataFrame(all_stats)
401
 
402
+ # Generate visualizations
403
+ fig = visualize_results(results_df, stats_df)
404
+
405
+ return results_df, stats_df, fig
406
 
407
  def format_results(results, stats):
408
  formatted_results = []
409
  for doc in results:
410
  result = {
 
411
  "Model": stats["model"],
412
+ "Content": doc.page_content,
413
+ "Embedding": doc.embedding if hasattr(doc, 'embedding') else None,
414
  **doc.metadata,
415
  **{k: v for k, v in stats.items() if k not in ["model"]}
416
  }
417
  formatted_results.append(result)
418
  return formatted_results
419
 
420
+ # Gradio Interface
421
+ def launch_interface(share=True):
422
+ iface = gr.Interface(
423
+ fn=compare_embeddings,
424
+ inputs=[
425
+ gr.File(label="Upload File (Optional)"),
426
+ gr.Textbox(label="Search Query"),
427
+ gr.CheckboxGroup(choices=list(model_manager.list_models().keys()) + ["Custom"], label="Embedding Model Types"),
428
+ gr.CheckboxGroup(choices=[model for models in model_manager.list_models().values() for model in models] + ["custom_model"], label="Embedding Models"),
429
+ gr.Radio(choices=["token", "recursive"], label="Split Strategy", value="recursive"),
430
+ gr.Slider(100, 1000, step=100, value=500, label="Chunk Size"),
431
+ gr.Slider(0, 100, step=10, value=50, label="Overlap Size"),
432
+ gr.Textbox(label="Custom Split Separators (comma-separated, optional)"),
433
+ gr.Radio(choices=["FAISS", "Chroma"], label="Vector Store Type", value="FAISS"),
434
+ gr.Radio(choices=["similarity", "mmr", "custom"], label="Search Type", value="similarity"),
435
+ gr.Slider(1, 10, step=1, value=5, label="Top K"),
436
+ gr.Dropdown(choices=["german", "english", "french"], label="Language", value="german"),
437
+ gr.Checkbox(label="Use Custom Embedding", value=False),
438
+ gr.Checkbox(label="Optimize Vocabulary", value=False),
439
+ gr.Slider(0, 1, step=0.1, value=0.3, label="Phonetic Matching Weight"),
440
+ gr.File(label="Custom Tokenizer File (Optional)")
441
+ ],
442
+ outputs=[
443
+ gr.Dataframe(label="Results", interactive=False),
444
+ gr.Dataframe(label="Statistics", interactive=False),
445
+ gr.Plot(label="Visualizations")
446
+ ],
447
+ title="Advanced Embedding Comparison Tool",
448
+ description="Compare different embedding models and retrieval strategies with advanced preprocessing and phonetic matching"
449
+ )
450
+
451
+ tutorial_md = """
452
+ # Advanced Embedding Comparison Tool Tutorial
453
+
454
+ This tool allows you to compare different embedding models and retrieval strategies for document search and similarity matching.
455
+
456
+ ## How to use:
457
+
458
+ 1. Upload a file (optional) or use the default files in the system.
459
+ 2. Enter a search query.
460
+ 3. Select one or more embedding model types and specific models.
461
+ 4. Choose a text splitting strategy and set chunk size and overlap.
462
+ 5. Select a vector store type and search type.
463
+ 6. Set the number of top results to retrieve.
464
+ 7. Choose the language of your documents.
465
+ 8. Optionally, use custom embeddings, optimize vocabulary, or adjust phonetic matching weight.
466
+ 9. If you have a custom tokenizer, upload the file.
467
+
468
+ The tool will process your query and display results, statistics, and visualizations to help you compare the performance of different models and strategies.
469
+ """
470
+
471
+ iface = gr.TabbedInterface(
472
+ [iface, gr.Markdown(tutorial_md)],
473
+ ["Embedding Comparison", "Tutorial"]
474
+ )
475
+
476
+ iface.launch(share=share)
477
+
478
+ if __name__ == "__main__":
479
+ launch_interface()