wi-lab commited on
Commit
b19fbcc
·
verified ·
1 Parent(s): cc312e8

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +29 -14
app.py CHANGED
@@ -474,6 +474,17 @@ with gr.Blocks(css="""
474
  margin-bottom: 20px;
475
  }
476
 
 
 
 
 
 
 
 
 
 
 
 
477
  """) as demo:
478
 
479
  # Contact Section
@@ -490,14 +501,14 @@ with gr.Blocks(css="""
490
 
491
  gr.Markdown("""
492
  <div class="bold-highlight">
493
- 🚀 Explore the pre-trained **LWM Model** here:
494
  <a target="_blank" href="https://huggingface.co/wi-lab/lwm">https://huggingface.co/wi-lab/lwm</a>
495
  </div>
496
  """)
497
 
498
  # Tab for Beam Prediction Task
499
  with gr.Tab("Beam Prediction Task"):
500
- gr.Markdown("### Beam Prediction Task")
501
 
502
  # Explanation section with creative spacing and minimal design
503
  gr.Markdown("""
@@ -516,11 +527,6 @@ with gr.Blocks(css="""
516
  </ul>
517
  </div>
518
  """)
519
- #gr.Markdown("""
520
- #<div class="explanation-box">
521
- # In this task, you'll predict the strongest mmWave beam from a predefined codebook based on Sub-6 GHz channels. Adjust the data percentage and task complexity to observe how LWM performs on different settings. These are just inferences on first the LWM model and then the trained downstream model for this task (A residual 1D-CNN model with 500K parameters). The dataset used for this task is a combination of six scenarios from the DeepMIMO dataset that were not included in the LWM pre-training, showing the genralization of our model.
522
- #</div>
523
- #""")
524
 
525
  with gr.Row():
526
  with gr.Column():
@@ -535,9 +541,16 @@ with gr.Blocks(css="""
535
  data_percentage_slider.change(fn=beam_prediction_task, inputs=[data_percentage_slider, task_complexity_dropdown], outputs=[raw_img_bp, embeddings_img_bp])
536
  task_complexity_dropdown.change(fn=beam_prediction_task, inputs=[data_percentage_slider, task_complexity_dropdown], outputs=[raw_img_bp, embeddings_img_bp])
537
 
 
 
 
 
 
 
 
538
  # Separate Tab for LoS/NLoS Classification Task
539
  with gr.Tab("LoS/NLoS Classification Task"):
540
- gr.Markdown("### LoS/NLoS Classification Task")
541
 
542
  # Explanation section with creative spacing
543
  gr.Markdown("""
@@ -557,11 +570,6 @@ with gr.Blocks(css="""
557
  </ul>
558
  </div>
559
  """)
560
- #gr.Markdown("""
561
- #<div class="explanation-box">
562
- # Use this task to classify whether a channel is LoS (Line-of-Sight) or NLoS (Non-Line-of-Sight). You can either upload your own dataset or use the default dataset to explore how LWM embeddings compare to raw channels. The default dataset is a combinbation of six scenarios from the DeepMIMO dataset. Your custom dataset in h5py format should contain 'channels' array of size (N,32,32), and 'labels' representing LoS/NLoS channels with 1/0. You can find additional information on how to save your dataset in the h5py format in the above-mentioned model repository. The interesting thing about this task is that we do not train any downstream model for LoS/NLoS classification, but just use a simple approach that predicts the label for a test sample based on the distance to the centroid of training samples corresponding to each label.
563
- #</div>
564
- #""")
565
 
566
  # Radio button for user choice: predefined data or upload dataset
567
  choice_radio = gr.Radio(choices=["Use Default Dataset", "Upload Dataset"], label="Choose how to proceed", value="Use Default Dataset")
@@ -579,7 +587,7 @@ with gr.Blocks(css="""
579
  with gr.Row():
580
  raw_img_los = gr.Image(label="Raw Channels", type="pil", width=300, height=300)
581
  embeddings_img_los = gr.Image(label="Embeddings", type="pil", width=300, height=300)
582
- output_textbox = gr.Textbox(label="Console Output", lines=10)
583
 
584
  # Update the file uploader visibility based on user choice
585
  def toggle_file_input(choice):
@@ -595,6 +603,13 @@ with gr.Blocks(css="""
595
  percentage_slider_los.change(fn=handle_user_choice, inputs=[choice_radio, percentage_slider_los, file_input],
596
  outputs=[raw_img_los, embeddings_img_los, output_textbox])
597
 
 
 
 
 
 
 
 
598
  # Launch the app
599
  if __name__ == "__main__":
600
  demo.launch()
 
474
  margin-bottom: 20px;
475
  }
476
 
477
+ .console-output {
478
+ background-color: #ffffff; /* Ensure it's visible in light mode */
479
+ color: #000000; /* Text in dark color for better contrast */
480
+ padding: 10px;
481
+ border-radius: 5px;
482
+ }
483
+
484
+ .plot-title {
485
+ font-weight: bold;
486
+ color: #2c3e50;
487
+ }
488
  """) as demo:
489
 
490
  # Contact Section
 
501
 
502
  gr.Markdown("""
503
  <div class="bold-highlight">
504
+ 🚀 Explore the pre-trained <b>LWM Model<b> here:
505
  <a target="_blank" href="https://huggingface.co/wi-lab/lwm">https://huggingface.co/wi-lab/lwm</a>
506
  </div>
507
  """)
508
 
509
  # Tab for Beam Prediction Task
510
  with gr.Tab("Beam Prediction Task"):
511
+ #gr.Markdown("### Beam Prediction Task")
512
 
513
  # Explanation section with creative spacing and minimal design
514
  gr.Markdown("""
 
527
  </ul>
528
  </div>
529
  """)
 
 
 
 
 
530
 
531
  with gr.Row():
532
  with gr.Column():
 
541
  data_percentage_slider.change(fn=beam_prediction_task, inputs=[data_percentage_slider, task_complexity_dropdown], outputs=[raw_img_bp, embeddings_img_bp])
542
  task_complexity_dropdown.change(fn=beam_prediction_task, inputs=[data_percentage_slider, task_complexity_dropdown], outputs=[raw_img_bp, embeddings_img_bp])
543
 
544
+ # Add a conclusion section at the bottom
545
+ gr.Markdown("""
546
+ <div class="explanation-box">
547
+ **Conclusions**: By adjusting the data percentage and task complexity, you can observe how the LWM generalizes well to unseen scenarios and handles various complexities in the beam prediction task.
548
+ </div>
549
+ """)
550
+
551
  # Separate Tab for LoS/NLoS Classification Task
552
  with gr.Tab("LoS/NLoS Classification Task"):
553
+ #gr.Markdown("### LoS/NLoS Classification Task")
554
 
555
  # Explanation section with creative spacing
556
  gr.Markdown("""
 
570
  </ul>
571
  </div>
572
  """)
 
 
 
 
 
573
 
574
  # Radio button for user choice: predefined data or upload dataset
575
  choice_radio = gr.Radio(choices=["Use Default Dataset", "Upload Dataset"], label="Choose how to proceed", value="Use Default Dataset")
 
587
  with gr.Row():
588
  raw_img_los = gr.Image(label="Raw Channels", type="pil", width=300, height=300)
589
  embeddings_img_los = gr.Image(label="Embeddings", type="pil", width=300, height=300)
590
+ output_textbox = gr.Textbox(label="Console Output", lines=10, css_classes=["console-output"])
591
 
592
  # Update the file uploader visibility based on user choice
593
  def toggle_file_input(choice):
 
603
  percentage_slider_los.change(fn=handle_user_choice, inputs=[choice_radio, percentage_slider_los, file_input],
604
  outputs=[raw_img_los, embeddings_img_los, output_textbox])
605
 
606
+ # Add a conclusion section at the bottom
607
+ gr.Markdown("""
608
+ <div class="explanation-box">
609
+ **Conclusions**: With this task, you can evaluate how well LWM embeddings perform on LoS/NLoS classification tasks, and compare it to the performance of raw channels in identifying these features.
610
+ </div>
611
+ """)
612
+
613
  # Launch the app
614
  if __name__ == "__main__":
615
  demo.launch()