Spanicin commited on
Commit
f52006b
·
verified ·
1 Parent(s): a407248

Update app_parallel.py

Browse files
Files changed (1) hide show
  1. app_parallel.py +24 -19
app_parallel.py CHANGED
@@ -109,6 +109,8 @@ def process_chunk(audio_chunk, preprocessed_data, args):
109
  first_coeff_path = preprocessed_data["first_coeff_path"]
110
  crop_pic_path = preprocessed_data["crop_pic_path"]
111
  crop_info = preprocessed_data["crop_info"]
 
 
112
 
113
  print("first_coeff_path",first_coeff_path)
114
  print("crop_pic_path",crop_pic_path)
@@ -132,14 +134,14 @@ def process_chunk(audio_chunk, preprocessed_data, args):
132
  result, base64_video, temp_file_path, _ = animate_from_coeff.generate(data, args.result_dir, args.source_image, crop_info,
133
  enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess)
134
 
135
- video_clip = mp.VideoFileClip(temp_file_path)
136
- duration = video_clip.duration
137
 
138
  app.config['temp_response'] = base64_video
139
  app.config['final_video_path'] = temp_file_path
140
- app.config['final_video_duration'] = duration
141
 
142
- return base64_video, temp_file_path, duration
143
 
144
 
145
  def create_temp_dir():
@@ -219,36 +221,39 @@ def generate_audio(voice_cloning, voice_gender, text_prompt):
219
  # Preprocessing step that runs only once
220
  def run_preprocessing(args):
221
  global path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting
222
- first_frame_dir = os.path.join(args.result_dir, 'first_frame_dir')
223
- os.makedirs(first_frame_dir, exist_ok=True)
224
-
225
- # Check if preprocessed data already exists
226
- fixed_temp_dir = 'tmp/preprocessed_data'
227
  os.makedirs(fixed_temp_dir, exist_ok=True)
228
  preprocessed_data_path = os.path.join(fixed_temp_dir, "preprocessed_data.pkl")
229
 
230
  if os.path.exists(preprocessed_data_path) and args.image_hardcoded == "yes":
 
231
  with open(preprocessed_data_path, "rb") as f:
232
  preprocessed_data = pickle.load(f)
233
  print("Loaded existing preprocessed data from:", preprocessed_data_path)
234
  else:
 
235
  preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, args.device)
236
  first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(args.source_image, first_frame_dir, args.preprocess, source_image_flag=True)
 
 
 
 
 
 
 
 
 
 
 
 
237
 
238
- if not first_coeff_path:
239
- raise Exception("Failed to get coefficients")
240
 
241
- # Save the preprocessed data
242
- preprocessed_data = {
243
- "first_coeff_path": first_coeff_path,
244
- "crop_pic_path": crop_pic_path,
245
- "crop_info": crop_info
246
- }
247
  with open(preprocessed_data_path, "wb") as f:
248
  pickle.dump(preprocessed_data, f)
 
249
 
250
  return preprocessed_data
251
-
252
  def split_audio(audio_path, chunk_duration=5):
253
  audio_clip = mp.AudioFileClip(audio_path)
254
  total_duration = audio_clip.duration
@@ -271,7 +276,7 @@ def generate_chunks(audio_chunks, preprocessed_data, args):
271
  for future in as_completed(future_to_chunk):
272
  chunk = future_to_chunk[future] # Get the original chunk that was processed
273
  try:
274
- base64_video, temp_file_path, duration = future.result() # Get the result of the completed task
275
  yield f"Task for chunk {chunk} completed with video path: {temp_file_path}\n"
276
  except Exception as e:
277
  yield f"Task for chunk {chunk} failed: {e}\n"
 
109
  first_coeff_path = preprocessed_data["first_coeff_path"]
110
  crop_pic_path = preprocessed_data["crop_pic_path"]
111
  crop_info = preprocessed_data["crop_info"]
112
+ with open(crop_info_path, "rb") as f:
113
+ crop_info = pickle.load(f)
114
 
115
  print("first_coeff_path",first_coeff_path)
116
  print("crop_pic_path",crop_pic_path)
 
134
  result, base64_video, temp_file_path, _ = animate_from_coeff.generate(data, args.result_dir, args.source_image, crop_info,
135
  enhancer=args.enhancer, background_enhancer=args.background_enhancer, preprocess=args.preprocess)
136
 
137
+ # video_clip = mp.VideoFileClip(temp_file_path)
138
+ # duration = video_clip.duration
139
 
140
  app.config['temp_response'] = base64_video
141
  app.config['final_video_path'] = temp_file_path
142
+ # app.config['final_video_duration'] = duration
143
 
144
+ return base64_video, temp_file_path
145
 
146
 
147
  def create_temp_dir():
 
221
  # Preprocessing step that runs only once
222
  def run_preprocessing(args):
223
  global path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting
224
+ fixed_temp_dir = "/tmp/preprocess_data"
 
 
 
 
225
  os.makedirs(fixed_temp_dir, exist_ok=True)
226
  preprocessed_data_path = os.path.join(fixed_temp_dir, "preprocessed_data.pkl")
227
 
228
  if os.path.exists(preprocessed_data_path) and args.image_hardcoded == "yes":
229
+ print("Loading preprocessed data...")
230
  with open(preprocessed_data_path, "rb") as f:
231
  preprocessed_data = pickle.load(f)
232
  print("Loaded existing preprocessed data from:", preprocessed_data_path)
233
  else:
234
+ print("Running preprocessing...")
235
  preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, args.device)
236
  first_coeff_path, crop_pic_path, crop_info = preprocess_model.generate(args.source_image, first_frame_dir, args.preprocess, source_image_flag=True)
237
+ first_coeff_new_path = os.path.join(fixed_temp_dir, os.path.basename(first_coeff_path))
238
+ crop_pic_new_path = os.path.join(fixed_temp_dir, os.path.basename(crop_pic_path))
239
+ crop_info_new_path = os.path.join(fixed_temp_dir, "crop_info.pkl")
240
+ shutil.move(first_coeff_path, first_coeff_new_path)
241
+ shutil.move(crop_pic_path, crop_pic_new_path)
242
+
243
+ with open(crop_info_new_path, "wb") as f:
244
+ pickle.dump(crop_info, f)
245
+
246
+ preprocessed_data = {"first_coeff_path": first_coeff_new_path,
247
+ "crop_pic_path": crop_pic_new_path,
248
+ "crop_info": crop_info_new_path}
249
 
 
 
250
 
 
 
 
 
 
 
251
  with open(preprocessed_data_path, "wb") as f:
252
  pickle.dump(preprocessed_data, f)
253
+ print(f"Preprocessed data saved to: {preprocessed_data_path}")
254
 
255
  return preprocessed_data
256
+
257
  def split_audio(audio_path, chunk_duration=5):
258
  audio_clip = mp.AudioFileClip(audio_path)
259
  total_duration = audio_clip.duration
 
276
  for future in as_completed(future_to_chunk):
277
  chunk = future_to_chunk[future] # Get the original chunk that was processed
278
  try:
279
+ base64_video, temp_file_path = future.result() # Get the result of the completed task
280
  yield f"Task for chunk {chunk} completed with video path: {temp_file_path}\n"
281
  except Exception as e:
282
  yield f"Task for chunk {chunk} failed: {e}\n"