Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -324,7 +324,7 @@ def classify_based_on_distance(train_data, train_labels, test_data):
|
|
324 |
|
325 |
return torch.tensor(predictions) # Return predictions as a PyTorch tensor
|
326 |
|
327 |
-
def plot_confusion_matrix(y_true, y_pred, title):
|
328 |
cm = confusion_matrix(y_true, y_pred)
|
329 |
|
330 |
# Calculate F1 Score
|
@@ -585,7 +585,7 @@ with gr.Blocks(css="""
|
|
585 |
<div style="background-color: #f0f0f0; padding: 15px; border-radius: 10px; color: #333;">
|
586 |
<h3 style="color: #0056b3;">π <b>LoS/NLoS Classification Task</b></h3>
|
587 |
<ul style="padding-left: 20px;">
|
588 |
-
<li><b>π― Goal</b>: Classify whether a channel is <b>LoS</b> (Line-of-Sight) or <b>NLoS</b> (Non-Line-of-Sight).</li>
|
589 |
<li><b>π Dataset</b>: Use the default dataset (a combination of six scenarios from the DeepMIMO dataset) or upload your own dataset in <b>h5py</b> format.</li>
|
590 |
<li><b>π‘ Custom Dataset Requirements:</b>
|
591 |
<ul>
|
@@ -594,7 +594,7 @@ with gr.Blocks(css="""
|
|
594 |
</ul>
|
595 |
</li>
|
596 |
<li><b>π Tip</b>: You can find guidance on how to structure your dataset in the provided model repository.</li>
|
597 |
-
<li><b>πΌ No Downstream Model</b>: Instead of a complex downstream model, we classify each sample based on its distance to the centroid of training samples from each class (LoS/NLoS).</
|
598 |
</ul>
|
599 |
</div>
|
600 |
""")
|
|
|
324 |
|
325 |
return torch.tensor(predictions) # Return predictions as a PyTorch tensor
|
326 |
|
327 |
+
def plot_confusion_matrix(y_true, y_pred, title, light_mode=True):
|
328 |
cm = confusion_matrix(y_true, y_pred)
|
329 |
|
330 |
# Calculate F1 Score
|
|
|
585 |
<div style="background-color: #f0f0f0; padding: 15px; border-radius: 10px; color: #333;">
|
586 |
<h3 style="color: #0056b3;">π <b>LoS/NLoS Classification Task</b></h3>
|
587 |
<ul style="padding-left: 20px;">
|
588 |
+
<li><b>π― Goal</b>: Classify whether a channel is <b>LoS</b> (Line-of-Sight) or <b>NLoS</b> (Non-Line-of-Sight) with very small LWM CLS embeddings.</li>
|
589 |
<li><b>π Dataset</b>: Use the default dataset (a combination of six scenarios from the DeepMIMO dataset) or upload your own dataset in <b>h5py</b> format.</li>
|
590 |
<li><b>π‘ Custom Dataset Requirements:</b>
|
591 |
<ul>
|
|
|
594 |
</ul>
|
595 |
</li>
|
596 |
<li><b>π Tip</b>: You can find guidance on how to structure your dataset in the provided model repository.</li>
|
597 |
+
<li><b>πΌ No Downstream Model</b>: Instead of a complex downstream model, we classify each sample based on its distance to the centroid of training samples from each class (LoS/NLoS).</il>
|
598 |
</ul>
|
599 |
</div>
|
600 |
""")
|