Spaces:
Running
Running
Sadjad Alikhani
commited on
Update app.py
Browse files
app.py
CHANGED
@@ -466,6 +466,14 @@ with gr.Blocks(css="""
|
|
466 |
margin-bottom: 20px;
|
467 |
}
|
468 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
469 |
""") as demo:
|
470 |
|
471 |
# Contact Section
|
@@ -479,6 +487,13 @@ with gr.Blocks(css="""
|
|
479 |
</a>
|
480 |
</div>
|
481 |
""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
482 |
|
483 |
# Tab for Beam Prediction Task
|
484 |
with gr.Tab("Beam Prediction Task"):
|
@@ -487,9 +502,25 @@ with gr.Blocks(css="""
|
|
487 |
# Explanation section with creative spacing and minimal design
|
488 |
gr.Markdown("""
|
489 |
<div class="explanation-box">
|
490 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
491 |
</div>
|
492 |
""")
|
|
|
|
|
|
|
|
|
|
|
493 |
|
494 |
with gr.Row():
|
495 |
with gr.Column():
|
@@ -511,9 +542,26 @@ with gr.Blocks(css="""
|
|
511 |
# Explanation section with creative spacing
|
512 |
gr.Markdown("""
|
513 |
<div class="explanation-box">
|
514 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
515 |
</div>
|
516 |
""")
|
|
|
|
|
|
|
|
|
|
|
517 |
|
518 |
# Radio button for user choice: predefined data or upload dataset
|
519 |
choice_radio = gr.Radio(choices=["Use Default Dataset", "Upload Dataset"], label="Choose how to proceed", value="Use Default Dataset")
|
@@ -547,7 +595,6 @@ with gr.Blocks(css="""
|
|
547 |
percentage_slider_los.change(fn=handle_user_choice, inputs=[choice_radio, percentage_slider_los, file_input],
|
548 |
outputs=[raw_img_los, embeddings_img_los, output_textbox])
|
549 |
|
550 |
-
|
551 |
# Launch the app
|
552 |
if __name__ == "__main__":
|
553 |
demo.launch()
|
|
|
466 |
margin-bottom: 20px;
|
467 |
}
|
468 |
|
469 |
+
.bold-highlight {
|
470 |
+
font-weight: bold;
|
471 |
+
color: #2c3e50;
|
472 |
+
font-size: 18px;
|
473 |
+
text-align: center;
|
474 |
+
margin-bottom: 20px;
|
475 |
+
}
|
476 |
+
|
477 |
""") as demo:
|
478 |
|
479 |
# Contact Section
|
|
|
487 |
</a>
|
488 |
</div>
|
489 |
""")
|
490 |
+
|
491 |
+
gr.Markdown("""
|
492 |
+
<div class="bold-highlight">
|
493 |
+
π Explore the pre-trained **LWM Model** here:
|
494 |
+
<a target="_blank" href="https://huggingface.co/sadjadalikhani/lwm/tree/main">https://huggingface.co/sadjadalikhani/lwm/tree/main</a>
|
495 |
+
</div>
|
496 |
+
""")
|
497 |
|
498 |
# Tab for Beam Prediction Task
|
499 |
with gr.Tab("Beam Prediction Task"):
|
|
|
502 |
# Explanation section with creative spacing and minimal design
|
503 |
gr.Markdown("""
|
504 |
<div class="explanation-box">
|
505 |
+
<h3>π‘ Beam Prediction Task</h3>
|
506 |
+
<ul>
|
507 |
+
<li>π― **Goal**: Predict the strongest **mmWave beam** from a predefined codebook using Sub-6 GHz channels.</li>
|
508 |
+
<li>βοΈ **Adjust Settings**: Use the sliders to control the training data percentage and task complexity (beam count) to explore model performance.</li>
|
509 |
+
<li>π§ **Inferences**:
|
510 |
+
<ul>
|
511 |
+
<li>π First, the LWM model extracts features.</li>
|
512 |
+
<li>π€ Then, the downstream residual 1D-CNN model (500K parameters) makes beam predictions.</li>
|
513 |
+
</ul>
|
514 |
+
</li>
|
515 |
+
<li>πΊοΈ **Dataset**: A combination of six scenarios from the DeepMIMO dataset (excluded from LWM pre-training) highlights the model's strong generalization abilities.</li>
|
516 |
+
</ul>
|
517 |
</div>
|
518 |
""")
|
519 |
+
#gr.Markdown("""
|
520 |
+
#<div class="explanation-box">
|
521 |
+
# In this task, you'll predict the strongest mmWave beam from a predefined codebook based on Sub-6 GHz channels. Adjust the data percentage and task complexity to observe how LWM performs on different settings. These are just inferences on first the LWM model and then the trained downstream model for this task (A residual 1D-CNN model with 500K parameters). The dataset used for this task is a combination of six scenarios from the DeepMIMO dataset that were not included in the LWM pre-training, showing the genralization of our model.
|
522 |
+
#</div>
|
523 |
+
#""")
|
524 |
|
525 |
with gr.Row():
|
526 |
with gr.Column():
|
|
|
542 |
# Explanation section with creative spacing
|
543 |
gr.Markdown("""
|
544 |
<div class="explanation-box">
|
545 |
+
<h3>π LoS/NLoS Classification Task</h3>
|
546 |
+
<ul>
|
547 |
+
<li>π― **Goal**: Classify whether a channel is **LoS** (Line-of-Sight) or **NLoS** (Non-Line-of-Sight).</li>
|
548 |
+
<li>π **Dataset**: Use the default dataset (a combination of six scenarios from the DeepMIMO dataset) or upload your own dataset in **h5py** format.</li>
|
549 |
+
<li>π‘ **Custom Dataset Requirements**:
|
550 |
+
<ul>
|
551 |
+
<li>π οΈ `channels` array: Shape (N,32,32)</li>
|
552 |
+
<li>π·οΈ `labels` array: Binary LoS/NLoS values (1/0)</li>
|
553 |
+
</ul>
|
554 |
+
</li>
|
555 |
+
<li>π **Tip**: You can find guidance on how to structure your dataset in the provided model repository.</li>
|
556 |
+
<li>πΌ **No Downstream Model**: Instead of a complex downstream model, we classify each sample based on its distance to the centroid of training samples from each class (LoS/NLoS).</li>
|
557 |
+
</ul>
|
558 |
</div>
|
559 |
""")
|
560 |
+
#gr.Markdown("""
|
561 |
+
#<div class="explanation-box">
|
562 |
+
# Use this task to classify whether a channel is LoS (Line-of-Sight) or NLoS (Non-Line-of-Sight). You can either upload your own dataset or use the default dataset to explore how LWM embeddings compare to raw channels. The default dataset is a combinbation of six scenarios from the DeepMIMO dataset. Your custom dataset in h5py format should contain 'channels' array of size (N,32,32), and 'labels' representing LoS/NLoS channels with 1/0. You can find additional information on how to save your dataset in the h5py format in the above-mentioned model repository. The interesting thing about this task is that we do not train any downstream model for LoS/NLoS classification, but just use a simple approach that predicts the label for a test sample based on the distance to the centroid of training samples corresponding to each label.
|
563 |
+
#</div>
|
564 |
+
#""")
|
565 |
|
566 |
# Radio button for user choice: predefined data or upload dataset
|
567 |
choice_radio = gr.Radio(choices=["Use Default Dataset", "Upload Dataset"], label="Choose how to proceed", value="Use Default Dataset")
|
|
|
595 |
percentage_slider_los.change(fn=handle_user_choice, inputs=[choice_radio, percentage_slider_los, file_input],
|
596 |
outputs=[raw_img_los, embeddings_img_los, output_textbox])
|
597 |
|
|
|
598 |
# Launch the app
|
599 |
if __name__ == "__main__":
|
600 |
demo.launch()
|