fffiloni commited on
Commit
2429a9d
1 Parent(s): 9aea3c7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +36 -41
app.py CHANGED
@@ -6,62 +6,57 @@ import datetime
6
  import sys
7
 
8
  def run_command(command):
9
- """Run a shell command and print its output."""
10
- print(f"Running command: {' '.join(command)}")
11
  try:
12
- subprocess.check_call(command, shell=True)
 
13
  except subprocess.CalledProcessError as e:
14
- print(f"Error running command {command}: {e}")
15
- sys.exit(1)
16
 
17
  def check_for_mp4_in_outputs(given_folder):
18
- # Define the path to the outputs folder
19
  outputs_folder = given_folder
20
-
21
- # Check if the outputs folder exists
22
  if not os.path.exists(outputs_folder):
23
  return None
24
-
25
- # Check if there is a .mp4 file in the outputs folder
26
  mp4_files = [f for f in os.listdir(outputs_folder) if f.endswith('.mp4')]
27
-
28
- # Return the path to the mp4 file if it exists
29
- if mp4_files:
30
- return os.path.join(outputs_folder, mp4_files[0])
31
- else:
32
- return None
33
-
34
 
35
  def infer(input_video, cropped_and_aligned):
 
 
36
 
37
- torch.cuda.empty_cache()
 
 
38
 
39
- filepath = input_video
40
- # Get the current timestamp
41
- timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
 
42
 
43
- output_folder_name = f"results_{timestamp}"
 
 
44
 
45
- if cropped_and_aligned is True:
46
- # Example: Run the inference script (replace with your actual command)
47
- run_command(f"{sys.executable} inference_keep.py -i={filepath} -o={output_folder_name} --has_aligned --save_video -s=1")
48
- else:
49
- run_command(f"{sys.executable} inference_keep.py -i={filepath} -o={output_folder_name} --draw_box --save_video -s=1 --bg_upsampler=realesrgan")
50
-
51
- torch.cuda.empty_cache()
52
-
53
- # Call the function and print the result
54
- this_infer_folder = os.path.splitext(os.path.basename(filepath))[0]
55
- joined_path = os.path.join(output_folder_name, this_infer_folder)
56
- mp4_file_path = check_for_mp4_in_outputs(joined_path)
57
- print(mp4_file_path)
58
 
59
- print(f"RESULT: {mp4_file_path}")
60
-
61
- return mp4_file_path
62
 
 
 
 
 
 
63
 
 
 
 
 
64
  result_video = gr.Video()
 
 
65
  with gr.Blocks() as demo:
66
  with gr.Column():
67
  gr.Markdown("# KEEP")
@@ -94,19 +89,19 @@ with gr.Blocks() as demo:
94
  ],
95
  fn = infer,
96
  inputs = [input_video, is_cropped_and_aligned],
97
- outputs = [result_video],
98
  run_on_click = False,
99
  cache_examples = "lazy"
100
  )
101
 
102
  with gr.Column():
103
  result_video.render()
104
-
105
 
106
  submit_btn.click(
107
  fn = infer,
108
  inputs = [input_video, is_cropped_and_aligned],
109
- outputs = [result_video],
110
  show_api=False
111
  )
112
 
 
6
  import sys
7
 
8
  def run_command(command):
9
+ """Run a shell command and return its output and error status."""
10
+ print(f"Running command: {command}")
11
  try:
12
+ result = subprocess.run(command, shell=True, check=True, capture_output=True, text=True)
13
+ return True, result.stdout
14
  except subprocess.CalledProcessError as e:
15
+ return False, f"Error running command: {e}\nOutput: {e.output}\nError: {e.stderr}"
 
16
 
17
  def check_for_mp4_in_outputs(given_folder):
 
18
  outputs_folder = given_folder
 
 
19
  if not os.path.exists(outputs_folder):
20
  return None
 
 
21
  mp4_files = [f for f in os.listdir(outputs_folder) if f.endswith('.mp4')]
22
+ return os.path.join(outputs_folder, mp4_files[0]) if mp4_files else None
 
 
 
 
 
 
23
 
24
  def infer(input_video, cropped_and_aligned):
25
+ try:
26
+ torch.cuda.empty_cache()
27
 
28
+ filepath = input_video
29
+ timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
30
+ output_folder_name = f"results_{timestamp}"
31
 
32
+ if cropped_and_aligned:
33
+ command = f"{sys.executable} inference_keep.py -i={filepath} -o={output_folder_name} --has_aligned --save_video -s=1"
34
+ else:
35
+ command = f"{sys.executable} inference_keep.py -i={filepath} -o={output_folder_name} --draw_box --save_video -s=1 --bg_upsampler=realesrgan"
36
 
37
+ success, output = run_command(command)
38
+ if not success:
39
+ return None, output # Return None for the video and the error message
40
 
41
+ torch.cuda.empty_cache()
 
 
 
 
 
 
 
 
 
 
 
 
42
 
43
+ this_infer_folder = os.path.splitext(os.path.basename(filepath))[0]
44
+ joined_path = os.path.join(output_folder_name, this_infer_folder)
45
+ mp4_file_path = check_for_mp4_in_outputs(joined_path)
46
 
47
+ if mp4_file_path:
48
+ print(f"RESULT: {mp4_file_path}")
49
+ return mp4_file_path, "Processing completed successfully."
50
+ else:
51
+ return None, "Processing completed, but no output video was found."
52
 
53
+ except Exception as e:
54
+ return None, f"An unexpected error occurred: {str(e)}"
55
+
56
+ # Gradio interface setup
57
  result_video = gr.Video()
58
+ error_output = gr.Textbox(label="Status/Error")
59
+
60
  with gr.Blocks() as demo:
61
  with gr.Column():
62
  gr.Markdown("# KEEP")
 
89
  ],
90
  fn = infer,
91
  inputs = [input_video, is_cropped_and_aligned],
92
+ outputs = [result_video, error_output],
93
  run_on_click = False,
94
  cache_examples = "lazy"
95
  )
96
 
97
  with gr.Column():
98
  result_video.render()
99
+ error_output.render()
100
 
101
  submit_btn.click(
102
  fn = infer,
103
  inputs = [input_video, is_cropped_and_aligned],
104
+ outputs = [result_video, error_output],
105
  show_api=False
106
  )
107