ahmedghani commited on
Commit
c92867b
β€’
1 Parent(s): e4bf961

added more tools

Browse files
README.md CHANGED
@@ -9,4 +9,12 @@ app_file: app.py
9
  pinned: false
10
  ---
11
 
 
 
 
 
 
 
 
 
12
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
9
  pinned: false
10
  ---
11
 
12
+ ```bash
13
+ conda create -n editing-tools python=3.9 -y
14
+ conda activate editing-tools
15
+ conda install -c "nvidia/label/cuda-11.7.0" cuda-toolkit cuda
16
+ pip install -r requirements.txt
17
+ python app.py
18
+ ```
19
+
20
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
__init__.py ADDED
File without changes
app.py CHANGED
@@ -1,36 +1,10 @@
1
- import os
2
- import torch
3
  import gradio as gr
4
- from video_watermark_remover import *
5
- from video_converter import *
6
- from image_converter import *
7
- from image_watermark_remover import *
8
- from typing import List
9
- from pydantic import BaseModel
10
- from lama_cleaner.server import main
11
 
12
- class FakeLamaArgs(BaseModel):
13
- host: str = "0.0.0.0"
14
- port: int = 5000
15
- model: str = 'lama'
16
- hf_access_token: str = ""
17
- sd_disable_nsfw: bool = False
18
- sd_cpu_textencoder: bool = True
19
- sd_run_local: bool = False
20
- sd_enable_xformers: bool = False
21
- local_files_only: bool = False
22
- cpu_offload: bool = False
23
- device: str = "cuda" if torch.cuda.is_available() else "cpu"
24
- gui: bool = False
25
- gui_size: List[int] = [1000, 1000]
26
- input: str = ''
27
- disable_model_switch: bool = True
28
- debug: bool = False
29
- no_half: bool = False
30
- disable_nsfw: bool = False
31
- enable_xformers: bool = True if torch.cuda.is_available() else False
32
- model_dir: str = None
33
- output_dir: str = None
34
 
35
  css = """
36
  #remove_btn {
@@ -51,18 +25,27 @@ css = """
51
  #convert_btn:hover {
52
  background: linear-gradient(#2bbbc3, #201d18);
53
  }
 
 
 
 
 
 
 
 
 
54
  footer {
55
  display: none !important;
56
  }
57
  """
58
 
59
- demo = gr.Blocks(css=css, title="Video Watermark Remover")
60
  with demo:
61
  with gr.Tab("Image Converter"):
62
  gr.Markdown("""
63
  # <center>πŸ–ΌοΈ Image Converter</center>
64
  """)
65
- image_format = ['jpg', 'jpeg', 'png', 'bmp', 'tiff', 'gif', 'webp', 'ico', 'heic', 'heiv', 'heif']
66
  with gr.Row():
67
  with gr.Column():
68
  input_image = gr.File(label="Upload an Image")
@@ -87,6 +70,32 @@ with demo:
87
  output_image_clean = gr.Image(label="Output Image", interactive=True)
88
 
89
  image_remove_btn.click(remove_image_watermark, inputs=[input_image_watermark], outputs=[output_image_clean])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
90
 
91
  with gr.Tab("Video Converter"):
92
  gr.Markdown("""
@@ -118,7 +127,7 @@ with demo:
118
 
119
  with gr.Tab("Video Watermark Remover"):
120
  gr.Markdown("""
121
- # <center>πŸŽ₯ Video Watermark Remover (Slow)</center>
122
  """)
123
  with gr.Row():
124
  with gr.Column():
@@ -136,14 +145,6 @@ with demo:
136
  with gr.Row():
137
  output_video = gr.File(label="Output Video", interactive=False)
138
  input_video.change(convert_video_to_frames, inputs=[input_video], outputs=[mask, remove_btn])
139
- remove_btn.click(remove_watermark, inputs=[mask], outputs=[output_video, remove_btn])
140
-
141
-
142
- # Change the code according to the error
143
- import threading
144
-
145
- thread = threading.Thread(target=main, kwargs={'args': FakeLamaArgs()})
146
- thread.daemon = True
147
- thread.start()
148
 
149
- demo.launch(show_api=False)
 
 
 
1
  import gradio as gr
2
+ from watermark_remover import convert_video_to_frames, remove_image_watermark, remove_video_watermark
3
+ from video_converter import convert_video
4
+ from image_converter import convert_image
5
+ from image_editing import edit_image
6
+ from image_inpainting import inpaint
 
 
7
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
 
9
  css = """
10
  #remove_btn {
 
25
  #convert_btn:hover {
26
  background: linear-gradient(#2bbbc3, #201d18);
27
  }
28
+ #button {
29
+ background: linear-gradient(#201d18, #2bbbc3);
30
+ font-weight: bold;
31
+ font-size: 18px;
32
+ color:white;
33
+ }
34
+ #button:hover {
35
+ background: linear-gradient(#2bbbc3, #201d18);
36
+ }
37
  footer {
38
  display: none !important;
39
  }
40
  """
41
 
42
+ demo = gr.Blocks(css=css, title="Editing Tools")
43
  with demo:
44
  with gr.Tab("Image Converter"):
45
  gr.Markdown("""
46
  # <center>πŸ–ΌοΈ Image Converter</center>
47
  """)
48
+ image_format = ['jpg', 'jpeg', 'png', 'bmp', 'tiff', 'gif', 'webp', 'ico']
49
  with gr.Row():
50
  with gr.Column():
51
  input_image = gr.File(label="Upload an Image")
 
70
  output_image_clean = gr.Image(label="Output Image", interactive=True)
71
 
72
  image_remove_btn.click(remove_image_watermark, inputs=[input_image_watermark], outputs=[output_image_clean])
73
+
74
+ with gr.Tab("Image Editing"):
75
+ gr.Markdown("""
76
+ # <center>πŸ–ΌοΈ Image Editing</center>
77
+ """)
78
+ input_editing_image = gr.Image(label="Upload an Image", type="pil", interactive=True)
79
+ image_editing_options = gr.Radio(["High Res", "Colorize", "Greyscale", "Remove Background"], label="Select Editing Option", interactive=True, value="High Resolution")
80
+ image_editing_btn = gr.Button("Submit", interactive=True, elem_id="button")
81
+ with gr.Row():
82
+ image_editing_output = gr.Image(label="Output Preview", interactive=False)
83
+ image_editing_file = gr.File(label="Download File", interactive=False)
84
+
85
+ image_editing_btn.click(edit_image, inputs=[input_editing_image, image_editing_options], outputs=[image_editing_output, image_editing_file])
86
+
87
+ with gr.Tab("Image Inpainting"):
88
+ gr.Markdown("""
89
+ # <center>πŸ–ΌοΈ Image Inpainting</center>
90
+ """)
91
+ input_inpainting_image = gr.Image(label="Upload an Image", type="pil", interactive=True, tool="sketch")
92
+ input_inpainting_prompt = gr.Textbox(label="Prompt", interactive=True)
93
+ input_inpainting_btn = gr.Button("Submit", interactive=True, elem_id="button")
94
+ with gr.Row():
95
+ input_inpainting_output = gr.Image(label="Image Preview", interactive=False)
96
+ input_inpainting_file = gr.File(label="Download File", interactive=False)
97
+
98
+ input_inpainting_btn.click(inpaint, inputs=[input_inpainting_image, input_inpainting_prompt], outputs=[input_inpainting_output, input_inpainting_file])
99
 
100
  with gr.Tab("Video Converter"):
101
  gr.Markdown("""
 
127
 
128
  with gr.Tab("Video Watermark Remover"):
129
  gr.Markdown("""
130
+ # <center>πŸŽ₯ Video Watermark Remover</center>
131
  """)
132
  with gr.Row():
133
  with gr.Column():
 
145
  with gr.Row():
146
  output_video = gr.File(label="Output Video", interactive=False)
147
  input_video.change(convert_video_to_frames, inputs=[input_video], outputs=[mask, remove_btn])
148
+ remove_btn.click(remove_video_watermark, inputs=[mask], outputs=[output_video, remove_btn])
 
 
 
 
 
 
 
 
149
 
150
+ demo.launch(show_api=False, share=True)
image_editing.py ADDED
@@ -0,0 +1,65 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from pathlib import Path
4
+ import torch
5
+ from basicsr.archs.rrdbnet_arch import RRDBNet
6
+ from gfpgan import GFPGANer
7
+ from realesrgan import RealESRGANer
8
+ import numpy as np
9
+ import cv2
10
+ from PIL import Image
11
+ from rembg import remove
12
+
13
+ # DeOldify
14
+ os.system("hub install deoldify==1.2.0")
15
+ import paddlehub as hub
16
+ hub.server_check()
17
+ colorize_model = hub.Module(name='deoldify')
18
+
19
+ highres_model = RRDBNet(num_in_ch=3, num_out_ch=3, num_feat=64, num_block=23, num_grow_ch=32, scale=4)
20
+ bg_upsampler = RealESRGANer(
21
+ scale=4,
22
+ model_path='https://github.com/xinntao/Real-ESRGAN/releases/download/v0.1.0/RealESRGAN_x4plus.pth',
23
+ model=highres_model,
24
+ tile=400,
25
+ tile_pad=10,
26
+ pre_pad=0,
27
+ half=True
28
+ )
29
+
30
+ upsampler = GFPGANer(
31
+ model_path='https://github.com/TencentARC/GFPGAN/releases/download/v1.3.0/GFPGANv1.4.pth',
32
+ upscale=4,
33
+ arch='clean',
34
+ channel_multiplier=2,
35
+ bg_upsampler=bg_upsampler,
36
+ device="cuda" if torch.cuda.is_available() else "cpu",
37
+ )
38
+
39
+
40
+ os.makedirs("deoldify", exist_ok=True)
41
+ os.makedirs("gfpganOutput", exist_ok=True)
42
+ os.makedirs("greyscale", exist_ok=True)
43
+ os.makedirs("rembg", exist_ok=True)
44
+
45
+ def restore_image(image):
46
+ _, _, output = upsampler.enhance(cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR), has_aligned=False, only_center_face=False, paste_back=True)
47
+ image = Image.fromarray(cv2.cvtColor(output, cv2.COLOR_BGR2RGB))
48
+ return image
49
+
50
+ def edit_image(image, option):
51
+ tools = ["High Res", "Colorize", "Greyscale", "Remove Background"]
52
+ if option == tools[0]:
53
+ restore_image(image).save("gfpganOutput/output.png")
54
+ return './gfpganOutput/output.png', './gfpganOutput/output.png'
55
+ elif option == tools[1]:
56
+ image.convert("L").save("deoldify/input.png", "PNG", quality=80, optimize=True)
57
+ colorize_model.predict("deoldify/input.png")
58
+ return './output/DeOldify/'+Path('deoldify/input.png').stem+".png", './output/DeOldify/'+Path('deoldify/input.png').stem+".png"
59
+
60
+ elif option == tools[2]:
61
+ image.convert('L').save("greyscale/output.png")
62
+ return './greyscale/output.png', './greyscale/output.png'
63
+ elif option == tools[3]:
64
+ remove(image).save("rembg/output.png")
65
+ return './rembg/output.png', './rembg/output.png'
image_inpainting.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ from PIL import Image
4
+ from diffusers import StableDiffusionInpaintPipeline
5
+ from diffusers import AutoencoderKL
6
+
7
+ device = "cuda" if torch.cuda.is_available() else "cpu"
8
+
9
+ pipe = StableDiffusionInpaintPipeline.from_pretrained("stabilityai/stable-diffusion-2-inpainting",
10
+ torch_dtype=torch.float16,
11
+ revision="fp16",
12
+ vae=AutoencoderKL.from_pretrained(
13
+ "stabilityai/sd-vae-ft-mse",
14
+ torch_dtype=torch.float16
15
+ ).to(device)
16
+ ).to(device)
17
+ pipe.enable_xformers_memory_efficient_attention()
18
+
19
+ os.makedirs("inpainting_output", exist_ok=True)
20
+
21
+ def inpaint(inputs, prompt):
22
+ image = inputs["image"].resize((image.size[0] - image.size[0] % 64, image.size[1] - image.size[1] % 64), Image.ANTIALIAS)
23
+ mask = inputs["mask"].resize((mask.size[0] - mask.size[0] % 64, mask.size[1] - mask.size[1] % 64), Image.ANTIALIAS)
24
+ output = pipe(prompt=prompt, image=image, mask_image=mask, guidance_scale=7.5, height=image.size[1], width=image.size[0])
25
+ output.images[0].save(f"inpainting_output/output.png")
26
+ return output.images[0], "inpainting_output/output.png"
image_variations.py ADDED
@@ -0,0 +1,62 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import cv2
3
+ from PIL import Image
4
+ import numpy as np
5
+ from diffusers import AutoencoderKL
6
+ from diffusers import UniPCMultistepScheduler
7
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
8
+ import torch
9
+ from transformers import BlipProcessor, BlipForConditionalGeneration
10
+
11
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
12
+
13
+ # Blip for Image Captioning
14
+ processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-base")
15
+ model = BlipForConditionalGeneration.from_pretrained(
16
+ "Salesforce/blip-image-captioning-base",
17
+ torch_dtype=torch.float16).to(device)
18
+
19
+ # ControlNet for Image Variation Generation based on Canny Edge Detection
20
+ pipe = StableDiffusionControlNetPipeline.from_pretrained(
21
+ "stabilityai/stable-diffusion-2-1-base",
22
+ controlnet=ControlNetModel.from_pretrained(
23
+ "thibaud/controlnet-sd21-canny-diffusers",
24
+ torch_dtype=torch.float16),
25
+ torch_dtype=torch.float16,
26
+ revision="fp16",
27
+ vae=AutoencoderKL.from_pretrained(
28
+ "stabilityai/sd-vae-ft-mse",
29
+ torch_dtype=torch.float16
30
+ ).to(device)
31
+ ).to(device)
32
+
33
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
34
+ pipe.enable_xformers_memory_efficient_attention()
35
+
36
+ def pre_process_image(image):
37
+ image = np.array(image)
38
+ low_threshold = 100
39
+ high_threshold = 200
40
+ image = cv2.Canny(image, low_threshold, high_threshold)
41
+ image = image[:, :, None]
42
+ image = np.concatenate([image, image, image], axis=2)
43
+ return Image.fromarray(image)
44
+
45
+ def image_variations(image, input_prompt):
46
+ canny_image = pre_process_image(image)
47
+ if input_prompt:
48
+ prompt = input_prompt
49
+ else:
50
+ inputs = processor(image, return_tensors="pt").to(device, torch.float16)
51
+ out = model.generate(**inputs)
52
+ prompt = processor.decode(out[0], skip_special_tokens=True)
53
+ print(f"Blip Captioning: {prompt}")
54
+
55
+ output_images = pipe(
56
+ [prompt]*4,
57
+ canny_image,
58
+ negative_prompt=["distorted, noisy, lowres, bad anatomy, worst quality, low quality, bad eyes, rough face, unclear face"] * 4,
59
+ num_inference_steps=25,
60
+ ).images
61
+
62
+ return output_images, canny_image
image_watermark_remover.py DELETED
@@ -1,61 +0,0 @@
1
- import io
2
- import requests
3
- from PIL import Image
4
-
5
- def remove_image_watermark(input):
6
- image = input["image"].convert("RGB")
7
- mask = input["mask"].convert("RGB")
8
- image_data = io.BytesIO()
9
- image.save(image_data, format="JPEG")
10
- image_data = image_data.getvalue()
11
-
12
- mask_data = io.BytesIO()
13
- mask.save(mask_data, format="JPEG")
14
- mask_data = mask_data.getvalue()
15
-
16
- # Prepare form data
17
- form_data = {
18
- 'ldmSteps': 25,
19
- 'ldmSampler': 'plms',
20
- 'zitsWireframe': True,
21
- 'hdStrategy': 'Original',
22
- 'hdStrategyCropMargin': 196,
23
- 'hdStrategyCropTrigerSize': 1280,
24
- 'hdStrategyResizeLimit': 2048,
25
- 'prompt': '',
26
- 'negativePrompt': '',
27
- 'croperX': -24,
28
- 'croperY': -23,
29
- 'croperHeight': 512,
30
- 'croperWidth': 512,
31
- 'useCroper': False,
32
- 'sdMaskBlur': 5,
33
- 'sdStrength': 0.75,
34
- 'sdSteps': 50,
35
- 'sdGuidanceScale': 7.5,
36
- 'sdSampler': 'pndm',
37
- 'sdSeed': 42,
38
- 'sdMatchHistograms': False,
39
- 'sdScale': 1,
40
- 'cv2Radius': 5,
41
- 'cv2Flag': 'INPAINT_NS',
42
- 'paintByExampleSteps': 50,
43
- 'paintByExampleGuidanceScale': 7.5,
44
- 'paintByExampleSeed': 42,
45
- 'paintByExampleMaskBlur': 5,
46
- 'paintByExampleMatchHistograms': False,
47
- 'sizeLimit': 1024,
48
- }
49
-
50
- files_data = {
51
- 'image': ('image.jpg', image_data),
52
- 'mask': ('mask.jpg', mask_data),
53
- }
54
-
55
- response = requests.post('http://localhost:5000/inpaint', data=form_data, files=files_data)
56
-
57
- if response.headers['Content-Type'] == 'image/jpeg' or response.headers['Content-Type'] == 'image/png':
58
- image = Image.open(io.BytesIO(response.content))
59
- return image
60
- else:
61
- print(f"Error processing Image: {response.text}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt CHANGED
@@ -1,8 +1,26 @@
 
 
 
 
1
  gradio==3.22.1
2
  ffmpeg-python
3
  moviepy
4
  pydub
5
  opencv-python
6
  pyheif
7
- lama-cleaner==0.33.0
8
- xformers==0.0.16
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu117
2
+ torch==1.13.1+cu117
3
+ torchvision==0.14.1+cu117
4
+ torchaudio==0.13.1
5
  gradio==3.22.1
6
  ffmpeg-python
7
  moviepy
8
  pydub
9
  opencv-python
10
  pyheif
11
+ lama-cleaner==0.37.1
12
+ xformers==0.0.16
13
+ basicsr
14
+ gfpgan
15
+ realesrgan
16
+ paddlepaddle-gpu
17
+ imageio==2.4.1
18
+ paddlehub
19
+ triton==2.0.0
20
+ git+https://github.com/muhammad-ahmed-ghani/rembg#egg=rembg[gpu]
21
+ transformers
22
+ ftfy
23
+ scipy
24
+ diffusers
25
+ python-dotenv
26
+ onnxruntime-gpu
video_converter.py CHANGED
@@ -47,6 +47,14 @@ class VideoConverter:
47
  except Exception as e:
48
  raise Exception(f"Error converting audio: {e}")
49
 
 
 
 
 
 
 
 
 
50
  def convert_video(input_file, format):
51
  try:
52
  converter = VideoConverter(input_file)
@@ -54,6 +62,8 @@ def convert_video(input_file, format):
54
  return converter.convert_video(f"output.{format}", format), "Converted video successfully!"
55
  elif format in ['mp3', 'wav', 'ogg', 'flac', 'aac']:
56
  return converter.convert_audio(f"output.{format}", format), "Converted audio successfully!"
 
 
57
  else:
58
  return None, "Unsupported format!"
59
  except Exception as e:
 
47
  except Exception as e:
48
  raise Exception(f"Error converting audio: {e}")
49
 
50
+ def convert_to_gif(self, output_file):
51
+ try:
52
+ self.video.write_gif(output_file)
53
+ print(f"Video converted to GIF successfully!")
54
+ return output_file
55
+ except Exception as e:
56
+ raise Exception(f"Error converting video to GIF: {e}")
57
+
58
  def convert_video(input_file, format):
59
  try:
60
  converter = VideoConverter(input_file)
 
62
  return converter.convert_video(f"output.{format}", format), "Converted video successfully!"
63
  elif format in ['mp3', 'wav', 'ogg', 'flac', 'aac']:
64
  return converter.convert_audio(f"output.{format}", format), "Converted audio successfully!"
65
+ elif format == "gif":
66
+ return converter.convert_to_gif(f"output.{format}"), "Converted to GIF successfully"
67
  else:
68
  return None, "Unsupported format!"
69
  except Exception as e:
video_watermark_remover.py β†’ watermark_remover.py RENAMED
@@ -1,94 +1,109 @@
1
  import glob
2
  import os
3
- import io
4
- import ffmpeg
5
- import requests
6
  from PIL import Image
7
  import shutil
8
  import concurrent.futures
9
  import gradio as gr
10
  import cv2
11
  import re
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
12
 
13
  def process_image(mask_data, image_path):
14
- image = Image.open(image_path)
15
- image_data = io.BytesIO()
16
- image.save(image_data, format=image.format)
17
- image_data = image_data.getvalue()
18
-
19
- # Prepare form data
20
- form_data = {
21
- 'ldmSteps': 25,
22
- 'ldmSampler': 'plms',
23
- 'zitsWireframe': True,
24
- 'hdStrategy': 'Original',
25
- 'hdStrategyCropMargin': 196,
26
- 'hdStrategyCropTrigerSize': 1280,
27
- 'hdStrategyResizeLimit': 2048,
28
- 'prompt': '',
29
- 'negativePrompt': '',
30
- 'croperX': -24,
31
- 'croperY': -23,
32
- 'croperHeight': 512,
33
- 'croperWidth': 512,
34
- 'useCroper': False,
35
- 'sdMaskBlur': 5,
36
- 'sdStrength': 0.75,
37
- 'sdSteps': 50,
38
- 'sdGuidanceScale': 7.5,
39
- 'sdSampler': 'pndm',
40
- 'sdSeed': 42,
41
- 'sdMatchHistograms': False,
42
- 'sdScale': 1,
43
- 'cv2Radius': 5,
44
- 'cv2Flag': 'INPAINT_NS',
45
- 'paintByExampleSteps': 50,
46
- 'paintByExampleGuidanceScale': 7.5,
47
- 'paintByExampleSeed': 42,
48
- 'paintByExampleMaskBlur': 5,
49
- 'paintByExampleMatchHistograms': False,
50
- 'sizeLimit': 1024,
51
- }
52
-
53
- files_data = {
54
- 'image': (os.path.basename(image_path), image_data),
55
- 'mask': ('mask.png', mask_data)
56
- }
57
-
58
- response = requests.post('http://localhost:5000/inpaint', data=form_data, files=files_data)
59
-
60
- if response.headers['Content-Type'] == 'image/jpeg' or response.headers['Content-Type'] == 'image/png':
61
- output_image_path = os.path.join('output_images', os.path.splitext(os.path.basename(image_path))[0] + '_inpainted' + os.path.splitext(image_path)[1])
62
- with open(output_image_path, 'wb') as output_image_file:
63
- output_image_file.write(response.content)
64
- else:
65
- print(f"Error processing {image_path}: {response.text}")
66
 
67
- def remove_watermark(sketch, images_path='frames', output_path='output_images'):
68
  if os.path.exists('output_images'):
69
  shutil.rmtree('output_images')
70
  os.makedirs('output_images')
71
 
72
- mask_data = io.BytesIO()
73
- sketch["mask"].save(mask_data, format=sketch["mask"].format)
74
- mask_data = mask_data.getvalue()
75
-
76
  image_paths = glob.glob(f'{images_path}/*.*')
77
 
78
- with concurrent.futures.ThreadPoolExecutor() as executor:
79
- executor.map(lambda image_path: process_image(mask_data, image_path), image_paths)
80
 
81
  return gr.File.update(value=convert_frames_to_video('output_images'), visible=True), gr.Button.update(value='Done!')
82
 
83
  def convert_video_to_frames(video):
84
  if os.path.exists('input_video.mp4'):
85
  os.remove('input_video.mp4')
86
-
87
  with open(video, 'rb') as f:
88
  with open('input_video.mp4', 'wb') as f2:
89
  f2.write(f.read())
90
-
91
- #os.system(f"ffmpeg -i {video} input_video.mp4")
92
  video_path = 'input_video.mp4'
93
 
94
  if os.path.exists('frames'):
 
1
  import glob
2
  import os
 
 
 
3
  from PIL import Image
4
  import shutil
5
  import concurrent.futures
6
  import gradio as gr
7
  import cv2
8
  import re
9
+ import numpy as np
10
+ import torch
11
+ from lama_cleaner.helper import (
12
+ norm_img,
13
+ get_cache_path_by_url,
14
+ load_jit_model,
15
+ )
16
+ from lama_cleaner.model.base import InpaintModel
17
+ from lama_cleaner.schema import Config
18
+
19
+ LAMA_MODEL_URL = os.environ.get(
20
+ "LAMA_MODEL_URL",
21
+ "https://github.com/Sanster/models/releases/download/add_big_lama/big-lama.pt",
22
+ )
23
+ LAMA_MODEL_MD5 = os.environ.get("LAMA_MODEL_MD5", "e3aa4aaa15225a33ec84f9f4bc47e500")
24
+
25
+ class LaMa(InpaintModel):
26
+ name = "lama"
27
+ pad_mod = 8
28
+
29
+ def init_model(self, device, **kwargs):
30
+ self.model = load_jit_model(LAMA_MODEL_URL, device, LAMA_MODEL_MD5).eval()
31
+
32
+ @staticmethod
33
+ def is_downloaded() -> bool:
34
+ return os.path.exists(get_cache_path_by_url(LAMA_MODEL_URL))
35
+
36
+ def forward(self, image, mask, config: Config):
37
+ """Input image and output image have same size
38
+ image: [H, W, C] RGB
39
+ mask: [H, W]
40
+ return: BGR IMAGE
41
+ """
42
+ image = norm_img(image)
43
+ mask = norm_img(mask)
44
+
45
+ mask = (mask > 0) * 1
46
+ image = torch.from_numpy(image).unsqueeze(0).to(self.device)
47
+ mask = torch.from_numpy(mask).unsqueeze(0).to(self.device)
48
+
49
+ inpainted_image = self.model(image, mask)
50
+
51
+ cur_res = inpainted_image[0].permute(1, 2, 0).detach().cpu().numpy()
52
+ cur_res = np.clip(cur_res * 255, 0, 255).astype("uint8")
53
+ cur_res = cv2.cvtColor(cur_res, cv2.COLOR_RGB2BGR)
54
+ return cur_res
55
+
56
+ lama_model = LaMa("cuda" if torch.cuda.is_available() else "cpu")
57
+ config = Config(hd_strategy_crop_margin=196, ldm_steps=25, hd_strategy='Original', hd_strategy_crop_trigger_size=1280, hd_strategy_resize_limit=2048)
58
+
59
+ def remove_image_watermark(inputs):
60
+ alpha_channel = None
61
+ image, mask = inputs["image"], inputs["mask"]
62
+ if image.mode == "RGBA":
63
+ image = np.array(image)
64
+ alpha_channel = image[:, :, -1]
65
+ image = cv2.cvtColor(image, cv2.COLOR_RGBA2RGB)
66
+ else:
67
+ image = np.array(image)
68
+ mask = cv2.threshold(np.array(mask.convert("L")), 127, 255, cv2.THRESH_BINARY)[1]
69
+ output = lama_model(image, mask, config)
70
+ output = cv2.cvtColor(output.astype(np.uint8), cv2.COLOR_BGR2RGB)
71
+ if alpha_channel is not None:
72
+ if alpha_channel.shape[:2] != output.shape[:2]:
73
+ alpha_channel = cv2.resize(
74
+ alpha_channel, dsize=(output.shape[1], output.shape[0])
75
+ )
76
+ output = np.concatenate(
77
+ (output, alpha_channel[:, :, np.newaxis]), axis=-1
78
+ )
79
+ return Image.fromarray(output)
80
 
81
  def process_image(mask_data, image_path):
82
+ output = remove_image_watermark({"image": Image.open(image_path), "mask": mask_data})
83
+ output_image_path = os.path.join('output_images', os.path.splitext(os.path.basename(image_path))[0] + '_inpainted' + os.path.splitext(image_path)[1])
84
+ output.save(output_image_path)
85
+ return output_image_path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
 
87
+ def remove_video_watermark(sketch, images_path='frames', output_path='output_images'):
88
  if os.path.exists('output_images'):
89
  shutil.rmtree('output_images')
90
  os.makedirs('output_images')
91
 
 
 
 
 
92
  image_paths = glob.glob(f'{images_path}/*.*')
93
 
94
+ with concurrent.futures.ThreadPoolExecutor(max_workers=3) as executor:
95
+ executor.map(lambda image_path: process_image(sketch["mask"], image_path), image_paths)
96
 
97
  return gr.File.update(value=convert_frames_to_video('output_images'), visible=True), gr.Button.update(value='Done!')
98
 
99
  def convert_video_to_frames(video):
100
  if os.path.exists('input_video.mp4'):
101
  os.remove('input_video.mp4')
102
+ # save the video to the current directory from temporary file
103
  with open(video, 'rb') as f:
104
  with open('input_video.mp4', 'wb') as f2:
105
  f2.write(f.read())
106
+ # os.system(f"ffmpeg -i {video} input_video.mp4")
 
107
  video_path = 'input_video.mp4'
108
 
109
  if os.path.exists('frames'):