Sadjad Alikhani commited on
Commit
4e62ce0
·
verified ·
1 Parent(s): 641c40b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +35 -23
app.py CHANGED
@@ -30,7 +30,6 @@ def display_predefined_images(percentage_idx, complexity_idx):
30
  return raw_image, embeddings_image
31
 
32
  import torch
33
- from transformers import AutoModel # Assuming you use a transformer-like model in your LWM repo
34
  import numpy as np
35
  import importlib.util
36
 
@@ -67,44 +66,57 @@ def process_python_file(uploaded_file, percentage_idx, complexity_idx):
67
  return f"Directory {model_repo_dir} does not exist."
68
 
69
  # Step 1: Load the custom model
70
- model = load_custom_model()
 
 
 
 
71
 
72
  # Step 2: Import the tokenizer
73
  from input_preprocess import tokenizer
74
 
75
- # Step 3: Load the uploaded .py file that contains the wireless channel matrix
76
- # Import the Python file dynamically
77
- spec = importlib.util.spec_from_file_location("uploaded_module", uploaded_file.name)
78
- uploaded_module = importlib.util.module_from_spec(spec)
79
- spec.loader.exec_module(uploaded_module)
80
 
81
- # Assuming the uploaded file defines a variable called 'channel_matrix'
82
- channel_matrix = uploaded_module.channel_matrix # This should be defined in the uploaded file
 
 
 
 
 
83
 
84
  # Step 4: Tokenize the data if needed (or perform any necessary preprocessing)
85
- preprocessed_data = tokenizer(manual_data=channel_matrix, gen_raw=True)
86
 
87
  # Step 5: Perform inference on the channel matrix using the model
88
- with torch.no_grad():
89
- input_tensor = torch.tensor(preprocessed_data).unsqueeze(0) # Add batch dimension
90
- output = model(input_tensor) # Perform inference
91
-
 
 
 
 
92
  # Step 6: Generate new images based on the inference results
93
- generated_raw_img = np.random.rand(300, 300, 3) * 255 # Placeholder: Replace with actual inference result
94
- generated_embeddings_img = np.random.rand(300, 300, 3) * 255 # Placeholder: Replace with actual inference result
95
 
96
  # Save the generated images
97
- generated_raw_image_path = os.path.join(GENERATED_PATH, f"generated_raw_{percentage_idx}_{complexity_idx}.png")
98
- generated_embeddings_image_path = os.path.join(GENERATED_PATH, f"generated_embeddings_{percentage_idx}_{complexity_idx}.png")
99
 
100
- Image.fromarray(generated_raw_img.astype(np.uint8)).save(generated_raw_image_path)
101
- Image.fromarray(generated_embeddings_img.astype(np.uint8)).save(generated_embeddings_image_path)
102
 
103
  # Load the generated images
104
- raw_image = Image.open(generated_raw_image_path)
105
- embeddings_image = Image.open(generated_embeddings_image_path)
106
 
107
- return raw_image, embeddings_image
108
 
109
  except Exception as e:
110
  return str(e), str(e)
 
30
  return raw_image, embeddings_image
31
 
32
  import torch
 
33
  import numpy as np
34
  import importlib.util
35
 
 
66
  return f"Directory {model_repo_dir} does not exist."
67
 
68
  # Step 1: Load the custom model
69
+ from lwm_model import LWM
70
+ import torch
71
+ device = 'cuda' if torch.cuda.is_available() else 'cpu'
72
+ print(f"Loading the LWM model on {device}...")
73
+ model = LWM.from_pretrained(device=device)
74
 
75
  # Step 2: Import the tokenizer
76
  from input_preprocess import tokenizer
77
 
78
+ ## Step 3: Load the uploaded .py file that contains the wireless channel matrix
79
+ ## Import the Python file dynamically
80
+ #spec = importlib.util.spec_from_file_location("uploaded_module", uploaded_file.name)
81
+ #uploaded_module = importlib.util.module_from_spec(spec)
82
+ #spec.loader.exec_module(uploaded_module)
83
 
84
+ ## Assuming the uploaded file defines a variable called 'dataset'
85
+ #manual_data = uploaded_module.dataset # This should be defined in the uploaded file
86
+
87
+ import pickle
88
+ # Load the .p file containing the wireless channel matrix
89
+ with open(uploaded_file.name, 'rb') as f:
90
+ manual_data = pickle.load(f)
91
 
92
  # Step 4: Tokenize the data if needed (or perform any necessary preprocessing)
93
+ preprocessed_chs = tokenizer(manual_data=manual_data)
94
 
95
  # Step 5: Perform inference on the channel matrix using the model
96
+ #with torch.no_grad():
97
+ # input_tensor = torch.tensor(preprocessed_data).unsqueeze(0) # Add batch dimension
98
+ # output = model(input_tensor) # Perform inference
99
+ from inference import lwm_inference, create_raw_dataset
100
+ output_emb = lwm_inference(preprocessed_chs, 'channel_emb', model)
101
+ output_raw = create_raw_dataset(preprocessed_chs, device)
102
+ print(output_emb.shape)
103
+ print(output_raw.shape)
104
  # Step 6: Generate new images based on the inference results
105
+ #generated_raw_img = np.random.rand(300, 300, 3) * 255 # Placeholder: Replace with actual inference result
106
+ #generated_embeddings_img = np.random.rand(300, 300, 3) * 255 # Placeholder: Replace with actual inference result
107
 
108
  # Save the generated images
109
+ #generated_raw_image_path = os.path.join(GENERATED_PATH, f"generated_raw_{percentage_idx}_{complexity_idx}.png")
110
+ #generated_embeddings_image_path = os.path.join(GENERATED_PATH, f"generated_embeddings_{percentage_idx}_{complexity_idx}.png")
111
 
112
+ #Image.fromarray(generated_raw_img.astype(np.uint8)).save(generated_raw_image_path)
113
+ #Image.fromarray(generated_embeddings_img.astype(np.uint8)).save(generated_embeddings_image_path)
114
 
115
  # Load the generated images
116
+ #raw_image = Image.open(generated_raw_image_path)
117
+ #embeddings_image = Image.open(generated_embeddings_image_path)
118
 
119
+ return output_emb, output_raw
120
 
121
  except Exception as e:
122
  return str(e), str(e)