Update app.py
Browse files
app.py
CHANGED
@@ -13,7 +13,6 @@ import tempfile
|
|
13 |
import uuid
|
14 |
import time
|
15 |
import threading
|
16 |
-
from concurrent.futures import ThreadPoolExecutor, as_completed
|
17 |
|
18 |
torch.set_float32_matmul_precision("medium")
|
19 |
|
@@ -61,33 +60,6 @@ def cleanup_temp_files():
|
|
61 |
cleanup_thread = threading.Thread(target=cleanup_temp_files, daemon=True)
|
62 |
cleanup_thread.start()
|
63 |
|
64 |
-
def process(image, bg, fast_mode=False):
|
65 |
-
image_size = image.size
|
66 |
-
input_images = transform_image(image).unsqueeze(0).to(device)
|
67 |
-
|
68 |
-
# Select the model based on fast_mode
|
69 |
-
model = birefnet_lite if fast_mode else birefnet
|
70 |
-
|
71 |
-
# Prediction
|
72 |
-
with torch.no_grad():
|
73 |
-
preds = model(input_images)[-1].sigmoid().cpu()
|
74 |
-
pred = preds[0].squeeze()
|
75 |
-
pred_pil = transforms.ToPILImage()(pred)
|
76 |
-
mask = pred_pil.resize(image_size)
|
77 |
-
|
78 |
-
if isinstance(bg, str) and bg.startswith("#"):
|
79 |
-
color_rgb = tuple(int(bg[i:i+2], 16) for i in (1, 3, 5))
|
80 |
-
background = Image.new("RGBA", image_size, color_rgb + (255,))
|
81 |
-
elif isinstance(bg, Image.Image):
|
82 |
-
background = bg.convert("RGBA").resize(image_size)
|
83 |
-
else:
|
84 |
-
background = Image.open(bg).convert("RGBA").resize(image_size)
|
85 |
-
|
86 |
-
# Composite the image onto the background using the mask
|
87 |
-
image = Image.composite(image, background, mask)
|
88 |
-
|
89 |
-
return image
|
90 |
-
|
91 |
|
92 |
@spaces.GPU
|
93 |
def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=0, video_handling="slow_down", fast_mode=True):
|
@@ -105,11 +77,11 @@ def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=
|
|
105 |
audio = video.audio
|
106 |
|
107 |
# Extract frames at the specified FPS
|
108 |
-
frames =
|
109 |
|
110 |
-
# Process
|
111 |
processed_frames = []
|
112 |
-
yield gr.update(visible=True), gr.update(visible=False), "Processing started... Elapsed time: 0 seconds"
|
113 |
|
114 |
if bg_type == "Video":
|
115 |
background_video = mp.VideoFileClip(bg_video)
|
@@ -124,8 +96,7 @@ def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=
|
|
124 |
|
125 |
bg_frame_index = 0 # Initialize background frame index
|
126 |
|
127 |
-
|
128 |
-
def process_single_frame(i, frame):
|
129 |
pil_image = Image.fromarray(frame)
|
130 |
if bg_type == "Color":
|
131 |
processed_image = process(pil_image, color, fast_mode)
|
@@ -134,41 +105,23 @@ def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=
|
|
134 |
elif bg_type == "Video":
|
135 |
if video_handling == "slow_down":
|
136 |
background_frame = background_frames[bg_frame_index % len(background_frames)]
|
|
|
|
|
|
|
137 |
else: # video_handling == "loop"
|
138 |
background_frame = background_frames[bg_frame_index % len(background_frames)]
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
processed_image = process(pil_image, background_image, fast_mode)
|
143 |
else:
|
144 |
processed_image = pil_image # Default to original image if no background is selected
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
149 |
-
future_to_index = {executor.submit(process_single_frame, i, frame): i for i, frame in enumerate(frames)}
|
150 |
-
|
151 |
-
# As each future completes, process the result
|
152 |
-
for future in as_completed(future_to_index):
|
153 |
-
i, processed_image = future.result()
|
154 |
-
processed_frames.append((i, processed_image))
|
155 |
-
|
156 |
-
# Update elapsed time
|
157 |
-
elapsed_time = time.time() - start_time
|
158 |
-
# Sort the processed_frames based on index to maintain order
|
159 |
-
processed_frames_sorted = sorted(processed_frames, key=lambda x: x[0])
|
160 |
-
|
161 |
-
# Yield the first processed image if it's available
|
162 |
-
if len(processed_frames_sorted) == 1:
|
163 |
-
first_image = Image.fromarray(processed_frames_sorted[0][1])
|
164 |
-
yield first_image, None, f"Processing frame {processed_frames_sorted[0][0]+1}... Elapsed time: {elapsed_time:.2f} seconds"
|
165 |
-
|
166 |
-
# Sort all processed frames
|
167 |
-
processed_frames_sorted = sorted(processed_frames, key=lambda x: x[0])
|
168 |
-
final_frames = [frame for i, frame in processed_frames_sorted]
|
169 |
|
170 |
# Create a new video from the processed frames
|
171 |
-
processed_video = mp.ImageSequenceClip(
|
172 |
|
173 |
# Add the original audio back to the processed video
|
174 |
processed_video = processed_video.set_audio(audio)
|
@@ -183,12 +136,42 @@ def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=
|
|
183 |
elapsed_time = time.time() - start_time
|
184 |
yield gr.update(visible=False), gr.update(visible=True), f"Processing complete! Elapsed time: {elapsed_time:.2f} seconds"
|
185 |
# Return the path to the temporary file
|
186 |
-
yield
|
187 |
|
188 |
except Exception as e:
|
189 |
print(f"Error: {e}")
|
190 |
elapsed_time = time.time() - start_time
|
191 |
yield gr.update(visible=False), gr.update(visible=True), f"Error processing video: {e}. Elapsed time: {elapsed_time:.2f} seconds"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
192 |
|
193 |
with gr.Blocks(theme=gr.themes.Ocean()) as demo:
|
194 |
gr.Markdown("# Video Background Remover & Changer\n### You can replace image background with any color, image or video.\nNOTE: As this Space is running on ZERO GPU it has limit. It can handle approx 200frmaes at once. So, if you have big video than use small chunks or Duplicate this space.")
|
|
|
13 |
import uuid
|
14 |
import time
|
15 |
import threading
|
|
|
16 |
|
17 |
torch.set_float32_matmul_precision("medium")
|
18 |
|
|
|
60 |
cleanup_thread = threading.Thread(target=cleanup_temp_files, daemon=True)
|
61 |
cleanup_thread.start()
|
62 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
63 |
|
64 |
@spaces.GPU
|
65 |
def fn(vid, bg_type="Color", bg_image=None, bg_video=None, color="#00FF00", fps=0, video_handling="slow_down", fast_mode=True):
|
|
|
77 |
audio = video.audio
|
78 |
|
79 |
# Extract frames at the specified FPS
|
80 |
+
frames = video.iter_frames(fps=fps)
|
81 |
|
82 |
+
# Process each frame for background removal
|
83 |
processed_frames = []
|
84 |
+
yield gr.update(visible=True), gr.update(visible=False), f"Processing started... Elapsed time: 0 seconds"
|
85 |
|
86 |
if bg_type == "Video":
|
87 |
background_video = mp.VideoFileClip(bg_video)
|
|
|
96 |
|
97 |
bg_frame_index = 0 # Initialize background frame index
|
98 |
|
99 |
+
for i, frame in enumerate(frames):
|
|
|
100 |
pil_image = Image.fromarray(frame)
|
101 |
if bg_type == "Color":
|
102 |
processed_image = process(pil_image, color, fast_mode)
|
|
|
105 |
elif bg_type == "Video":
|
106 |
if video_handling == "slow_down":
|
107 |
background_frame = background_frames[bg_frame_index % len(background_frames)]
|
108 |
+
bg_frame_index += 1
|
109 |
+
background_image = Image.fromarray(background_frame)
|
110 |
+
processed_image = process(pil_image, background_image, fast_mode)
|
111 |
else: # video_handling == "loop"
|
112 |
background_frame = background_frames[bg_frame_index % len(background_frames)]
|
113 |
+
bg_frame_index += 1
|
114 |
+
background_image = Image.fromarray(background_frame)
|
115 |
+
processed_image = process(pil_image, background_image, fast_mode)
|
|
|
116 |
else:
|
117 |
processed_image = pil_image # Default to original image if no background is selected
|
118 |
+
|
119 |
+
processed_frames.append(np.array(processed_image))
|
120 |
+
elapsed_time = time.time() - start_time
|
121 |
+
yield processed_image, None, f"Processing frame {i+1}... Elapsed time: {elapsed_time:.2f} seconds"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
122 |
|
123 |
# Create a new video from the processed frames
|
124 |
+
processed_video = mp.ImageSequenceClip(processed_frames, fps=fps)
|
125 |
|
126 |
# Add the original audio back to the processed video
|
127 |
processed_video = processed_video.set_audio(audio)
|
|
|
136 |
elapsed_time = time.time() - start_time
|
137 |
yield gr.update(visible=False), gr.update(visible=True), f"Processing complete! Elapsed time: {elapsed_time:.2f} seconds"
|
138 |
# Return the path to the temporary file
|
139 |
+
yield processed_image, temp_filepath, f"Processing complete! Elapsed time: {elapsed_time:.2f} seconds"
|
140 |
|
141 |
except Exception as e:
|
142 |
print(f"Error: {e}")
|
143 |
elapsed_time = time.time() - start_time
|
144 |
yield gr.update(visible=False), gr.update(visible=True), f"Error processing video: {e}. Elapsed time: {elapsed_time:.2f} seconds"
|
145 |
+
yield None, f"Error processing video: {e}", f"Error processing video: {e}. Elapsed time: {elapsed_time:.2f} seconds"
|
146 |
+
|
147 |
+
|
148 |
+
def process(image, bg, fast_mode=False):
|
149 |
+
image_size = image.size
|
150 |
+
input_images = transform_image(image).unsqueeze(0).to("cuda")
|
151 |
+
|
152 |
+
# Select the model based on fast_mode
|
153 |
+
model = birefnet_lite if fast_mode else birefnet
|
154 |
+
|
155 |
+
# Prediction
|
156 |
+
with torch.no_grad():
|
157 |
+
preds = model(input_images)[-1].sigmoid().cpu()
|
158 |
+
pred = preds[0].squeeze()
|
159 |
+
pred_pil = transforms.ToPILImage()(pred)
|
160 |
+
mask = pred_pil.resize(image_size)
|
161 |
+
|
162 |
+
if isinstance(bg, str) and bg.startswith("#"):
|
163 |
+
color_rgb = tuple(int(bg[i:i+2], 16) for i in (1, 3, 5))
|
164 |
+
background = Image.new("RGBA", image_size, color_rgb + (255,))
|
165 |
+
elif isinstance(bg, Image.Image):
|
166 |
+
background = bg.convert("RGBA").resize(image_size)
|
167 |
+
else:
|
168 |
+
background = Image.open(bg).convert("RGBA").resize(image_size)
|
169 |
+
|
170 |
+
# Composite the image onto the background using the mask
|
171 |
+
image = Image.composite(image, background, mask)
|
172 |
+
|
173 |
+
return image
|
174 |
+
|
175 |
|
176 |
with gr.Blocks(theme=gr.themes.Ocean()) as demo:
|
177 |
gr.Markdown("# Video Background Remover & Changer\n### You can replace image background with any color, image or video.\nNOTE: As this Space is running on ZERO GPU it has limit. It can handle approx 200frmaes at once. So, if you have big video than use small chunks or Duplicate this space.")
|