Sadjad Alikhani commited on
Commit
1012e18
·
verified ·
1 Parent(s): 7ee077b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -17
app.py CHANGED
@@ -75,6 +75,13 @@ def load_custom_model():
75
 
76
  import importlib.util
77
 
 
 
 
 
 
 
 
78
  # Function to process the uploaded .p file and perform inference using the custom model
79
  def process_p_file(uploaded_file, percentage_idx, complexity_idx):
80
  capture = PrintCapture()
@@ -89,7 +96,7 @@ def process_p_file(uploaded_file, percentage_idx, complexity_idx):
89
  print(f"Cloning model repository from {model_repo_url}...")
90
  subprocess.run(["git", "clone", model_repo_url, model_repo_dir], check=True)
91
 
92
- # Step 2: Verify the repository was cloned
93
  if os.path.exists(model_repo_dir):
94
  os.chdir(model_repo_dir)
95
  print(f"Changed working directory to {os.getcwd()}")
@@ -98,33 +105,43 @@ def process_p_file(uploaded_file, percentage_idx, complexity_idx):
98
  print(f"Directory {model_repo_dir} does not exist.")
99
  return
100
 
101
- # Step 3: Dynamically import lwm_model.py using importlib
102
  lwm_model_path = os.path.join(os.getcwd(), 'lwm_model.py')
103
- if not os.path.exists(lwm_model_path):
104
- print(f"Error: lwm_model.py not found at {lwm_model_path}")
 
 
 
 
 
105
  return f"Error: lwm_model.py not found at {lwm_model_path}"
106
 
107
- # Use importlib to dynamically load lwm_model.py
108
- spec = importlib.util.spec_from_file_location("lwm_model", lwm_model_path)
109
- lwm_model = importlib.util.module_from_spec(spec)
110
- spec.loader.exec_module(lwm_model)
111
-
112
- # Step 4: Load the model from LWM module
 
 
 
 
 
 
 
113
  device = 'cpu'
114
  print(f"Loading the LWM model on {device}...")
115
  model = lwm_model.LWM.from_pretrained(device=device)
116
 
117
- # Step 5: Import tokenizer and load data
118
- from input_preprocess import tokenizer
119
  with open(uploaded_file.name, 'rb') as f:
120
  manual_data = pickle.load(f)
121
 
122
- preprocessed_chs = tokenizer(manual_data=manual_data)
123
 
124
- # Step 6: Perform inference
125
- from inference import lwm_inference, create_raw_dataset
126
- output_emb = lwm_inference(preprocessed_chs, 'channel_emb', model)
127
- output_raw = create_raw_dataset(preprocessed_chs, device)
128
 
129
  print(f"Output Embeddings Shape: {output_emb.shape}")
130
  print(f"Output Raw Shape: {output_raw.shape}")
@@ -137,6 +154,8 @@ def process_p_file(uploaded_file, percentage_idx, complexity_idx):
137
  finally:
138
  sys.stdout = sys.__stdout__ # Reset print statements
139
 
 
 
140
  # Function to handle logic based on whether a file is uploaded or not
141
  def los_nlos_classification(file, percentage_idx, complexity_idx):
142
  if file is not None:
 
75
 
76
  import importlib.util
77
 
78
+ # Function to dynamically load a Python module from a given file path
79
+ def load_module_from_path(module_name, file_path):
80
+ spec = importlib.util.spec_from_file_location(module_name, file_path)
81
+ module = importlib.util.module_from_spec(spec)
82
+ spec.loader.exec_module(module)
83
+ return module
84
+
85
  # Function to process the uploaded .p file and perform inference using the custom model
86
  def process_p_file(uploaded_file, percentage_idx, complexity_idx):
87
  capture = PrintCapture()
 
96
  print(f"Cloning model repository from {model_repo_url}...")
97
  subprocess.run(["git", "clone", model_repo_url, model_repo_dir], check=True)
98
 
99
+ # Step 2: Verify the repository was cloned and change the working directory
100
  if os.path.exists(model_repo_dir):
101
  os.chdir(model_repo_dir)
102
  print(f"Changed working directory to {os.getcwd()}")
 
105
  print(f"Directory {model_repo_dir} does not exist.")
106
  return
107
 
108
+ # Step 3: Dynamically load lwm_model.py, input_preprocess.py, and inference.py
109
  lwm_model_path = os.path.join(os.getcwd(), 'lwm_model.py')
110
+ input_preprocess_path = os.path.join(os.getcwd(), 'input_preprocess.py')
111
+ inference_path = os.path.join(os.getcwd(), 'inference.py')
112
+
113
+ # Load lwm_model
114
+ if os.path.exists(lwm_model_path):
115
+ lwm_model = load_module_from_path("lwm_model", lwm_model_path)
116
+ else:
117
  return f"Error: lwm_model.py not found at {lwm_model_path}"
118
 
119
+ # Load input_preprocess
120
+ if os.path.exists(input_preprocess_path):
121
+ input_preprocess = load_module_from_path("input_preprocess", input_preprocess_path)
122
+ else:
123
+ return f"Error: input_preprocess.py not found at {input_preprocess_path}"
124
+
125
+ # Load inference
126
+ if os.path.exists(inference_path):
127
+ inference = load_module_from_path("inference", inference_path)
128
+ else:
129
+ return f"Error: inference.py not found at {inference_path}"
130
+
131
+ # Step 4: Load the model from lwm_model module
132
  device = 'cpu'
133
  print(f"Loading the LWM model on {device}...")
134
  model = lwm_model.LWM.from_pretrained(device=device)
135
 
136
+ # Step 5: Tokenize the data using the tokenizer from input_preprocess
 
137
  with open(uploaded_file.name, 'rb') as f:
138
  manual_data = pickle.load(f)
139
 
140
+ preprocessed_chs = input_preprocess.tokenizer(manual_data=manual_data)
141
 
142
+ # Step 6: Perform inference using the functions from inference.py
143
+ output_emb = inference.lwm_inference(preprocessed_chs, 'channel_emb', model)
144
+ output_raw = inference.create_raw_dataset(preprocessed_chs, device)
 
145
 
146
  print(f"Output Embeddings Shape: {output_emb.shape}")
147
  print(f"Output Raw Shape: {output_raw.shape}")
 
154
  finally:
155
  sys.stdout = sys.__stdout__ # Reset print statements
156
 
157
+
158
+
159
  # Function to handle logic based on whether a file is uploaded or not
160
  def los_nlos_classification(file, percentage_idx, complexity_idx):
161
  if file is not None: