Spaces:
Running
Running
File size: 9,370 Bytes
8f8b054 0176215 2587718 0176215 2587718 0176215 2587718 0176215 2587718 0176215 2587718 0176215 2587718 0176215 2587718 0176215 f528f62 0176215 148ab33 0176215 de0a7e9 0176215 ca32c10 0176215 7e7ba0a 0176215 8f8b054 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 |
import gradio as gr
import os
from PIL import Image
import numpy as np
# Paths to the predefined images folder
RAW_PATH = os.path.join("images", "raw")
EMBEDDINGS_PATH = os.path.join("images", "embeddings")
GENERATED_PATH = os.path.join("images", "generated")
# Specific values for percentage and complexity
percentage_values = [10, 30, 50, 70, 100]
complexity_values = [16, 32]
# Function to load and display predefined images based on user selection
def display_predefined_images(percentage_idx, complexity_idx):
# Map the slider index to the actual value
percentage = percentage_values[percentage_idx]
complexity = complexity_values[complexity_idx]
# Generate the paths to the images
raw_image_path = os.path.join(RAW_PATH, f"percentage_{percentage}_complexity_{complexity}.png")
embeddings_image_path = os.path.join(EMBEDDINGS_PATH, f"percentage_{percentage}_complexity_{complexity}.png")
# Load images using PIL
raw_image = Image.open(raw_image_path)
embeddings_image = Image.open(embeddings_image_path)
# Return the loaded images
return raw_image, embeddings_image
import torch
from transformers import AutoModel # Assuming you use a transformer-like model in your LWM repo
import numpy as np
import importlib.util
import torch
import numpy as np
import importlib.util
import subprocess
import os
# Function to load the pre-trained model from your cloned repository
def load_custom_model():
# Assume your model is in the cloned LWM repository
from lwm_model import LWM # Assuming the model is defined in lwm_model.py
model = LWM() # Modify this according to your model initialization
model.eval() # Set the model to evaluation mode
return model
# Function to process the uploaded .py file and perform inference using the custom model
def process_python_file(uploaded_file, percentage_idx, complexity_idx):
try:
# Clone the repository if not already done (for model and tokenizer)
model_repo_url = "https://huggingface.co/sadjadalikhani/LWM"
model_repo_dir = "./LWM"
if not os.path.exists(model_repo_dir):
print(f"Cloning model repository from {model_repo_url}...")
subprocess.run(["git", "clone", model_repo_url, model_repo_dir], check=True)
# Change the working directory to the cloned LWM folder
if os.path.exists(model_repo_dir):
os.chdir(model_repo_dir)
print(f"Changed working directory to {os.getcwd()}")
else:
return f"Directory {model_repo_dir} does not exist."
# Step 1: Load the custom model
model = load_custom_model()
# Step 2: Import the tokenizer
from input_preprocess import tokenizer
# Step 3: Load the uploaded .py file that contains the wireless channel matrix
# Import the Python file dynamically
spec = importlib.util.spec_from_file_location("uploaded_module", uploaded_file.name)
uploaded_module = importlib.util.module_from_spec(spec)
spec.loader.exec_module(uploaded_module)
# Assuming the uploaded file defines a variable called 'channel_matrix'
channel_matrix = uploaded_module.channel_matrix # This should be defined in the uploaded file
# Step 4: Tokenize the data if needed (or perform any necessary preprocessing)
preprocessed_data = tokenizer(manual_data=channel_matrix, gen_raw=True)
# Step 5: Perform inference on the channel matrix using the model
with torch.no_grad():
input_tensor = torch.tensor(preprocessed_data).unsqueeze(0) # Add batch dimension
output = model(input_tensor) # Perform inference
# Step 6: Generate new images based on the inference results
generated_raw_img = np.random.rand(300, 300, 3) * 255 # Placeholder: Replace with actual inference result
generated_embeddings_img = np.random.rand(300, 300, 3) * 255 # Placeholder: Replace with actual inference result
# Save the generated images
generated_raw_image_path = os.path.join(GENERATED_PATH, f"generated_raw_{percentage_idx}_{complexity_idx}.png")
generated_embeddings_image_path = os.path.join(GENERATED_PATH, f"generated_embeddings_{percentage_idx}_{complexity_idx}.png")
Image.fromarray(generated_raw_img.astype(np.uint8)).save(generated_raw_image_path)
Image.fromarray(generated_embeddings_img.astype(np.uint8)).save(generated_embeddings_image_path)
# Load the generated images
raw_image = Image.open(generated_raw_image_path)
embeddings_image = Image.open(generated_embeddings_image_path)
return raw_image, embeddings_image
except Exception as e:
return str(e), str(e)
# Function to handle logic based on whether a file is uploaded or not
def los_nlos_classification(file, percentage_idx, complexity_idx):
if file is not None:
# Process the uploaded file and generate new images
return process_python_file(file, percentage_idx, complexity_idx)
else:
# Display predefined images if no file is uploaded
return display_predefined_images(percentage_idx, complexity_idx)
# Define the Gradio interface
with gr.Blocks(css="""
.vertical-slider input[type=range] {
writing-mode: bt-lr; /* IE */
-webkit-appearance: slider-vertical; /* WebKit */
width: 8px;
height: 200px;
}
.slider-container {
display: inline-block;
margin-right: 50px;
text-align: center;
}
""") as demo:
# Contact Section
gr.Markdown(
"""
## Contact
<div style="display: flex; align-items: center;">
<a target="_blank" href="mailto:info@wirelessmodel.com"><img src="https://img.shields.io/badge/email-info@wirelessmodel.com-blue.svg?logo=gmail " alt="Email"></a>
<a target="_blank" href="https://telegram.me/wirelessmodel"><img src="https://img.shields.io/badge/telegram-@wirelessmodel-blue.svg?logo=telegram " alt="Telegram"></a>
</div>
"""
)
# Tabs for Beam Prediction and LoS/NLoS Classification
with gr.Tab("Beam Prediction Task"):
gr.Markdown("### Beam Prediction Task")
# Sliders for percentage and complexity
with gr.Row():
with gr.Column(elem_id="slider-container"):
gr.Markdown("Percentage of Data for Training")
percentage_slider_bp = gr.Slider(minimum=0, maximum=4, step=1, value=0, interactive=True, elem_id="vertical-slider")
with gr.Column(elem_id="slider-container"):
gr.Markdown("Task Complexity")
complexity_slider_bp = gr.Slider(minimum=0, maximum=1, step=1, value=0, interactive=True, elem_id="vertical-slider")
# Image outputs (display the images side by side and set a smaller size for the images)
with gr.Row():
raw_img_bp = gr.Image(label="Raw Channels", type="pil", width=300, height=300, interactive=False)
embeddings_img_bp = gr.Image(label="Embeddings", type="pil", width=300, height=300, interactive=False)
# Instant image updates when sliders change
percentage_slider_bp.change(fn=display_predefined_images, inputs=[percentage_slider_bp, complexity_slider_bp], outputs=[raw_img_bp, embeddings_img_bp])
complexity_slider_bp.change(fn=display_predefined_images, inputs=[percentage_slider_bp, complexity_slider_bp], outputs=[raw_img_bp, embeddings_img_bp])
with gr.Tab("LoS/NLoS Classification Task"):
gr.Markdown("### LoS/NLoS Classification Task")
# File uploader for uploading .py file
file_input = gr.File(label="Upload .py File", file_types=[".py"])
# Sliders for percentage and complexity
with gr.Row():
with gr.Column(elem_id="slider-container"):
gr.Markdown("Percentage of Data for Training")
percentage_slider_los = gr.Slider(minimum=0, maximum=4, step=1, value=0, interactive=True, elem_id="vertical-slider")
with gr.Column(elem_id="slider-container"):
gr.Markdown("Task Complexity")
complexity_slider_los = gr.Slider(minimum=0, maximum=1, step=1, value=0, interactive=True, elem_id="vertical-slider")
# Image outputs (display the images side by side and set a smaller size for the images)
with gr.Row():
raw_img_los = gr.Image(label="Raw Channels", type="pil", width=300, height=300, interactive=False)
embeddings_img_los = gr.Image(label="Embeddings", type="pil", width=300, height=300, interactive=False)
# Instant image updates based on file upload or slider changes
file_input.change(fn=los_nlos_classification, inputs=[file_input, percentage_slider_los, complexity_slider_los], outputs=[raw_img_los, embeddings_img_los])
percentage_slider_los.change(fn=los_nlos_classification, inputs=[file_input, percentage_slider_los, complexity_slider_los], outputs=[raw_img_los, embeddings_img_los])
complexity_slider_los.change(fn=los_nlos_classification, inputs=[file_input, percentage_slider_los, complexity_slider_los], outputs=[raw_img_los, embeddings_img_los])
# Launch the app
if __name__ == "__main__":
demo.launch()
|