Spaces:
Running
on
Zero
Running
on
Zero
Omartificial-Intelligence-Space
commited on
Commit
•
61dd04e
1
Parent(s):
6f8d49a
update app.py
Browse files
app.py
CHANGED
@@ -2,7 +2,7 @@ import gradio as gr
|
|
2 |
import spaces
|
3 |
import torch
|
4 |
import pandas as pd
|
5 |
-
import
|
6 |
from datasets import load_dataset
|
7 |
from sentence_transformers import SentenceTransformer
|
8 |
from sentence_transformers.evaluation import InformationRetrievalEvaluator, SequentialEvaluator
|
@@ -13,7 +13,7 @@ device = "cuda" if torch.cuda.is_available() else "cpu"
|
|
13 |
zero = torch.Tensor([0]).to(device)
|
14 |
print(f"Device being used: {zero.device}")
|
15 |
|
16 |
-
@spaces.GPU
|
17 |
def evaluate_model(model_id):
|
18 |
model = SentenceTransformer(model_id, device=device)
|
19 |
matryoshka_dimensions = [768, 512, 256, 128, 64]
|
@@ -21,7 +21,7 @@ def evaluate_model(model_id):
|
|
21 |
# Prepare datasets
|
22 |
datasets_info = [
|
23 |
{
|
24 |
-
"name": "
|
25 |
"dataset_id": "Omartificial-Intelligence-Space/Arabic-finanical-rag-embedding-dataset",
|
26 |
"split": "train",
|
27 |
"size": 7000,
|
@@ -29,7 +29,7 @@ def evaluate_model(model_id):
|
|
29 |
"sample_size": 500
|
30 |
},
|
31 |
{
|
32 |
-
"name": "MLQA
|
33 |
"dataset_id": "google/xtreme",
|
34 |
"subset": "MLQA.ar.ar",
|
35 |
"split": "validation",
|
@@ -38,7 +38,7 @@ def evaluate_model(model_id):
|
|
38 |
"sample_size": 500
|
39 |
},
|
40 |
{
|
41 |
-
"name": "ARCD
|
42 |
"dataset_id": "hsseinmz/arcd",
|
43 |
"split": "train",
|
44 |
"size": None,
|
@@ -105,24 +105,32 @@ def evaluate_model(model_id):
|
|
105 |
})
|
106 |
scores.append(score)
|
107 |
|
108 |
-
# Store scores by dataset for
|
109 |
scores_by_dataset[dataset_info["name"]] = scores
|
110 |
|
111 |
# Convert results to DataFrame for display
|
112 |
result_df = pd.DataFrame(evaluation_results)
|
113 |
|
114 |
-
# Generate bar charts for each dataset
|
115 |
charts = []
|
116 |
-
|
|
|
117 |
for dataset_name, scores in scores_by_dataset.items():
|
118 |
-
fig
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
charts.append(fig)
|
127 |
|
128 |
return result_df, charts[0], charts[1], charts[2]
|
@@ -134,32 +142,23 @@ def display_results(model_name):
|
|
134 |
|
135 |
demo = gr.Interface(
|
136 |
fn=display_results,
|
137 |
-
inputs=gr.Textbox(label="Enter
|
138 |
outputs=[
|
139 |
gr.Dataframe(label="Evaluation Results"),
|
140 |
-
gr.Plot(label="
|
141 |
-
gr.Plot(label="MLQA
|
142 |
-
gr.Plot(label="ARCD
|
143 |
],
|
144 |
-
title="
|
145 |
description=(
|
146 |
-
"Evaluate your Sentence Transformer model
|
147 |
-
"
|
148 |
-
"
|
149 |
-
"
|
150 |
-
"**Evaluation Metric:**\n"
|
151 |
-
"The evaluation uses **NDCG@10** (Normalized Discounted Cumulative Gain), which measures how well the retrieved documents (contexts) match the query relevance.\n"
|
152 |
-
"Higher scores indicate better performance. Embedding dimensions are reduced from 768 to 64, evaluating how well the model performs with fewer dimensions."
|
153 |
),
|
154 |
theme="default",
|
155 |
live=False,
|
156 |
css="footer {visibility: hidden;}"
|
157 |
)
|
158 |
|
159 |
-
demo.launch(share=True)
|
160 |
-
|
161 |
-
|
162 |
-
demo.launch(share=True)
|
163 |
-
|
164 |
-
# Add the footer
|
165 |
-
print("\nCreated by Omar Najar | Omartificial Intelligence Space")
|
|
|
2 |
import spaces
|
3 |
import torch
|
4 |
import pandas as pd
|
5 |
+
import plotly.graph_objects as go
|
6 |
from datasets import load_dataset
|
7 |
from sentence_transformers import SentenceTransformer
|
8 |
from sentence_transformers.evaluation import InformationRetrievalEvaluator, SequentialEvaluator
|
|
|
13 |
zero = torch.Tensor([0]).to(device)
|
14 |
print(f"Device being used: {zero.device}")
|
15 |
|
16 |
+
@spaces.GPU
|
17 |
def evaluate_model(model_id):
|
18 |
model = SentenceTransformer(model_id, device=device)
|
19 |
matryoshka_dimensions = [768, 512, 256, 128, 64]
|
|
|
21 |
# Prepare datasets
|
22 |
datasets_info = [
|
23 |
{
|
24 |
+
"name": "Financial",
|
25 |
"dataset_id": "Omartificial-Intelligence-Space/Arabic-finanical-rag-embedding-dataset",
|
26 |
"split": "train",
|
27 |
"size": 7000,
|
|
|
29 |
"sample_size": 500
|
30 |
},
|
31 |
{
|
32 |
+
"name": "MLQA",
|
33 |
"dataset_id": "google/xtreme",
|
34 |
"subset": "MLQA.ar.ar",
|
35 |
"split": "validation",
|
|
|
38 |
"sample_size": 500
|
39 |
},
|
40 |
{
|
41 |
+
"name": "ARCD",
|
42 |
"dataset_id": "hsseinmz/arcd",
|
43 |
"split": "train",
|
44 |
"size": None,
|
|
|
105 |
})
|
106 |
scores.append(score)
|
107 |
|
108 |
+
# Store scores by dataset for plot creation
|
109 |
scores_by_dataset[dataset_info["name"]] = scores
|
110 |
|
111 |
# Convert results to DataFrame for display
|
112 |
result_df = pd.DataFrame(evaluation_results)
|
113 |
|
114 |
+
# Generate bar charts for each dataset using Plotly
|
115 |
charts = []
|
116 |
+
color_scale = ['#003f5c', '#2f4b7c', '#665191', '#a05195', '#d45087']
|
117 |
+
|
118 |
for dataset_name, scores in scores_by_dataset.items():
|
119 |
+
fig = go.Figure()
|
120 |
+
fig.add_trace(go.Bar(
|
121 |
+
x=[str(dim) for dim in matryoshka_dimensions],
|
122 |
+
y=scores,
|
123 |
+
marker_color=color_scale,
|
124 |
+
text=[f"{score:.3f}" if score else "N/A" for score in scores],
|
125 |
+
textposition='auto'
|
126 |
+
))
|
127 |
+
|
128 |
+
fig.update_layout(
|
129 |
+
title=f"{dataset_name} Evaluation",
|
130 |
+
xaxis_title="Embedding Dimension",
|
131 |
+
yaxis_title="NDCG@10 Score",
|
132 |
+
template="plotly_white"
|
133 |
+
)
|
134 |
charts.append(fig)
|
135 |
|
136 |
return result_df, charts[0], charts[1], charts[2]
|
|
|
142 |
|
143 |
demo = gr.Interface(
|
144 |
fn=display_results,
|
145 |
+
inputs=gr.Textbox(label="Enter a Hugging Face Model ID", placeholder="e.g., sentence-transformers/all-MiniLM-L6-v2"),
|
146 |
outputs=[
|
147 |
gr.Dataframe(label="Evaluation Results"),
|
148 |
+
gr.Plot(label="Financial Dataset"),
|
149 |
+
gr.Plot(label="MLQA Dataset"),
|
150 |
+
gr.Plot(label="ARCD Dataset")
|
151 |
],
|
152 |
+
title="Arabic Embedding Evaluation",
|
153 |
description=(
|
154 |
+
"Evaluate your Sentence Transformer model on **Arabic retrieval tasks** using Matryoshka embeddings. "
|
155 |
+
"Compare performance across financial, long-context, and short-context datasets.\n\n"
|
156 |
+
"The evaluation uses **NDCG@10** to measure how well the model retrieves relevant contexts. "
|
157 |
+
"Embedding dimensions are reduced from 768 to 64."
|
|
|
|
|
|
|
158 |
),
|
159 |
theme="default",
|
160 |
live=False,
|
161 |
css="footer {visibility: hidden;}"
|
162 |
)
|
163 |
|
164 |
+
demo.launch(share=True)
|
|
|
|
|
|
|
|
|
|
|
|