ahmedghani commited on
Commit
50695f9
Β·
1 Parent(s): d687237

added convertor

Browse files
Files changed (4) hide show
  1. app.py +63 -126
  2. requirements.txt +3 -1
  3. video_convertor.py +57 -0
  4. video_watermark_remover.py +157 -0
app.py CHANGED
@@ -1,110 +1,7 @@
1
- import glob
2
- import os
3
- import io
4
- import ffmpeg
5
- import requests
6
- from PIL import Image
7
  import gradio as gr
8
- import shutil
9
- import concurrent.futures
10
 
11
- def process_image(mask_data, image_path):
12
- image = Image.open(image_path)
13
- image_data = io.BytesIO()
14
- image.save(image_data, format=image.format)
15
- image_data = image_data.getvalue()
16
-
17
- # Prepare form data
18
- form_data = {
19
- 'ldmSteps': 25,
20
- 'ldmSampler': 'plms',
21
- 'zitsWireframe': True,
22
- 'hdStrategy': 'Original',
23
- 'hdStrategyCropMargin': 196,
24
- 'hdStrategyCropTrigerSize': 1280,
25
- 'hdStrategyResizeLimit': 2048,
26
- 'prompt': '',
27
- 'negativePrompt': '',
28
- 'croperX': -24,
29
- 'croperY': -23,
30
- 'croperHeight': 512,
31
- 'croperWidth': 512,
32
- 'useCroper': False,
33
- 'sdMaskBlur': 5,
34
- 'sdStrength': 0.75,
35
- 'sdSteps': 50,
36
- 'sdGuidanceScale': 7.5,
37
- 'sdSampler': 'pndm',
38
- 'sdSeed': 42,
39
- 'sdMatchHistograms': False,
40
- 'sdScale': 1,
41
- 'cv2Radius': 5,
42
- 'cv2Flag': 'INPAINT_NS',
43
- 'paintByExampleSteps': 50,
44
- 'paintByExampleGuidanceScale': 7.5,
45
- 'paintByExampleSeed': 42,
46
- 'paintByExampleMaskBlur': 5,
47
- 'paintByExampleMatchHistograms': False,
48
- 'sizeLimit': 1024,
49
- }
50
-
51
- files_data = {
52
- 'image': (os.path.basename(image_path), image_data),
53
- 'mask': ('mask.png', mask_data)
54
- }
55
-
56
- response = requests.post('https://ahmedghani-lama-cleaner-lama.hf.space/inpaint', data=form_data, files=files_data)
57
-
58
- if response.headers['Content-Type'] == 'image/jpeg' or response.headers['Content-Type'] == 'image/png':
59
- output_image_path = os.path.join('output_images', os.path.splitext(os.path.basename(image_path))[0] + '_inpainted' + os.path.splitext(image_path)[1])
60
- with open(output_image_path, 'wb') as output_image_file:
61
- output_image_file.write(response.content)
62
- else:
63
- print(f"Error processing {image_path}: {response.text}")
64
-
65
- def remove_watermark(sketch, images_path='frames', output_path='output_images'):
66
- if os.path.exists('output_images'):
67
- shutil.rmtree('output_images')
68
- os.makedirs('output_images')
69
-
70
- mask_data = io.BytesIO()
71
- sketch["mask"].save(mask_data, format=sketch["mask"].format)
72
- mask_data = mask_data.getvalue()
73
-
74
- image_paths = glob.glob(f'{images_path}/*.*')
75
-
76
- with concurrent.futures.ThreadPoolExecutor() as executor:
77
- executor.map(lambda image_path: process_image(mask_data, image_path), image_paths)
78
-
79
- return gr.Video.update(value=convert_frames_to_video('output_images'), visible=True), gr.Button.update(value='Done!')
80
-
81
- def convert_video_to_frames(video):
82
- print(f" input video is : {video}")
83
- if os.path.exists('input_video.mp4'):
84
- os.remove('input_video.mp4')
85
-
86
- ffmpeg.input(video).output('input_video.mp4').run()
87
- video_path = 'input_video.mp4'
88
-
89
- if os.path.exists('frames'):
90
- shutil.rmtree('frames')
91
- os.makedirs('frames')
92
-
93
- video_name = os.path.splitext(os.path.basename(video_path))[0]
94
- ffmpeg.input(video_path).output(f'frames/{video_name}_%d.jpg', qscale=2).run()
95
- return gr.Image.update(value=f"{os.getcwd()}/frames/{video_name}_1.jpg", interactive=True), gr.Button.update(interactive=True)
96
-
97
- def convert_frames_to_video(frames_path):
98
- if os.path.exists('output_video.mp4'):
99
- os.remove('output_video.mp4')
100
-
101
- (
102
- ffmpeg
103
- .input(f'{frames_path}/*.jpg', pattern_type='glob', framerate=25)
104
- .output('output_video.mp4')
105
- .run()
106
- )
107
- return gr.Video.update(value='output_video.mp4', visible=True, interactive=True), gr.Button.update(interactive=False)
108
 
109
  css = """
110
  #remove_btn {
@@ -116,33 +13,73 @@ css = """
116
  #remove_btn:hover {
117
  background: linear-gradient(#2bbbc3, #201d18);
118
  }
 
 
 
 
 
 
 
 
 
119
  footer {
120
  display: none !important;
121
  }
122
  """
 
123
  demo = gr.Blocks(css=css, title="Video Watermark Remover")
124
  with demo:
125
- gr.Markdown("""
126
- # <center>πŸŽ₯ Video Watermark Remover</center>
127
- """)
128
- with gr.Row():
129
- with gr.Column():
130
- input_video = gr.Video(label="Upload a Video")
131
- with gr.Column():
132
- mask = gr.Image(label="Create a mask for the image", tool="sketch", type="pil", interactive=False)
133
- with gr.Row():
134
- with gr.Column():
135
- pass
136
- with gr.Column():
137
- remove_btn = gr.Button("Remove Watermark", interactive=False, elem_id="remove_btn")
138
- with gr.Column():
139
- pass
140
-
141
- output_video = gr.Video(label="Output Video", interactive=False)
142
- input_video.change(convert_video_to_frames, inputs=[input_video], outputs=[mask, remove_btn])
143
- remove_btn.click(remove_watermark, inputs=[mask], outputs=[output_video, remove_btn])
144
-
145
- #position:fixed;bottom:0;left:0;right:0;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  gr.Markdown("""## <center style="margin:20px;">Developed by Muhammad Ahmed<img src="https://avatars.githubusercontent.com/u/63394104?v=4" style="height:50px;width:50px;border-radius:50%;margin:5px;"></img></center>
147
  """)
148
  demo.launch(show_api=False)
 
 
 
 
 
 
 
1
  import gradio as gr
2
+ from video_watermark_remover import *
3
+ from video_convertor import *
4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
 
6
  css = """
7
  #remove_btn {
 
13
  #remove_btn:hover {
14
  background: linear-gradient(#2bbbc3, #201d18);
15
  }
16
+ #convert_btn {
17
+ background: linear-gradient(#201d18, #2bbbc3);
18
+ font-weight: bold;
19
+ font-size: 18px;
20
+ color:white;
21
+ }
22
+ #convert_btn:hover {
23
+ background: linear-gradient(#2bbbc3, #201d18);
24
+ }
25
  footer {
26
  display: none !important;
27
  }
28
  """
29
+
30
  demo = gr.Blocks(css=css, title="Video Watermark Remover")
31
  with demo:
32
+ with gr.Tab("Video Watermark Remover"):
33
+ gr.Markdown("""
34
+ # <center>πŸŽ₯ Video Watermark Remover</center>
35
+ """)
36
+ with gr.Row():
37
+ with gr.Column():
38
+ input_video = gr.Video(label="Upload a Video")
39
+ with gr.Column():
40
+ mask = gr.Image(label="Create a mask for the image", tool="sketch", type="pil", interactive=False)
41
+ with gr.Row():
42
+ with gr.Column():
43
+ pass
44
+ with gr.Column():
45
+ remove_btn = gr.Button("Remove Watermark", interactive=False, elem_id="remove_btn")
46
+ with gr.Column():
47
+ pass
48
+
49
+ output_video = gr.Video(label="Output Video", interactive=False)
50
+ input_video.change(convert_video_to_frames, inputs=[input_video], outputs=[mask, remove_btn])
51
+ remove_btn.click(remove_watermark, inputs=[mask], outputs=[output_video, remove_btn])
52
+
53
+ with gr.Tab("Video Convertor"):
54
+ gr.Markdown("""
55
+ # <center>πŸŽ₯ Video Convertor</center>
56
+ """)
57
+ video_format = ['webm', 'wmv', 'mkv', 'mp4', 'avi', 'mpeg', 'vob', 'flv']
58
+ audio_format = ['mp3', 'wav', 'ogg', 'flac', 'aac']
59
+ with gr.Row():
60
+ with gr.Column():
61
+ input_video = gr.Video(label="Upload a Video")
62
+ with gr.Column():
63
+ with gr.Row():
64
+ format_select = gr.Radio(["Video", "Audio"], label="Select Format", default="Video")
65
+ with gr.Row():
66
+ format = gr.Radio(video_format, label="Select Format", interactive=False)
67
+ with gr.Row():
68
+ with gr.Column():
69
+ pass
70
+ with gr.Column():
71
+ convert_btn = gr.Button("Convert Video", interactive=False, elem_id="convert_btn")
72
+ with gr.Column():
73
+ pass
74
+ with gr.Row():
75
+ with gr.Column():
76
+ output_video = gr.Video(label="Output Video", interactive=False)
77
+ with gr.Column():
78
+ output_audio = gr.Audio(label="Output Audio", interactive=False)
79
+ status = gr.Textbox(label="Status", interactive=False)
80
+ format_select.change(lambda x: gr.Radio.update(choices=video_format if x == "Video" else audio_format, interactive=True), inputs=[format_select], outputs=[format])
81
+ format.change(lambda x: gr.Button.update(interactive=True), None, outputs=[convert_btn])
82
+ convert_btn.click(convert_video, inputs=[input_video, format], outputs=[output_audio, output_video, status])
83
  gr.Markdown("""## <center style="margin:20px;">Developed by Muhammad Ahmed<img src="https://avatars.githubusercontent.com/u/63394104?v=4" style="height:50px;width:50px;border-radius:50%;margin:5px;"></img></center>
84
  """)
85
  demo.launch(show_api=False)
requirements.txt CHANGED
@@ -1,2 +1,4 @@
1
  gradio==3.22.1
2
- ffmpeg-python
 
 
 
1
  gradio==3.22.1
2
+ ffmpeg-python
3
+ moviepy
4
+ pydub
video_convertor.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from moviepy.editor import *
3
+ from pydub import AudioSegment
4
+
5
+ class VideoConverter:
6
+ def __init__(self, input_file):
7
+ self.input_file = input_file
8
+ self.video = None
9
+ self.audio = None
10
+
11
+ if not os.path.exists(self.input_file):
12
+ raise FileNotFoundError(f"File not found: {self.input_file}")
13
+
14
+ self.load_video()
15
+
16
+ def load_video(self):
17
+ try:
18
+ self.video = VideoFileClip(self.input_file)
19
+ self.audio = self.video.audio
20
+ except Exception as e:
21
+ raise Exception(f"Error loading video: {e}")
22
+
23
+ def convert_video(self, output_file, format):
24
+ if format not in ['webm', 'wmv', 'mkv', 'mp4', 'avi', 'mpeg', 'vob', 'flv']:
25
+ raise ValueError(f"Unsupported format: {format}")
26
+
27
+ try:
28
+ self.video.write_videofile(output_file, codec=format.lower())
29
+ print(f"Video converted to {format} format successfully!")
30
+ return output_file
31
+ except Exception as e:
32
+ raise Exception(f"Error converting video: {e}")
33
+
34
+
35
+ def convert_audio(self, output_file, format):
36
+ if format not in ['mp3', 'wav', 'ogg', 'flac', 'aac']:
37
+ raise ValueError(f"Unsupported format: {format}")
38
+
39
+ try:
40
+ audio_segment = AudioSegment.from_file(self.input_file, self.audio.codec)
41
+ audio_segment.export(output_file, format=format.lower())
42
+ print(f"Audio converted to {format} format successfully!")
43
+ return output_file
44
+ except Exception as e:
45
+ raise Exception(f"Error converting audio: {e}")
46
+
47
+ def convert_video(input_file, format):
48
+ try:
49
+ converter = VideoConverter(input_file)
50
+ if format in ['webm', 'wmv', 'mkv', 'mp4', 'avi', 'mpeg', 'vob', 'flv']:
51
+ return None, converter.convert_video(f"output.{format}", format), "Converted video successfully!"
52
+ elif format in ['mp3', 'wav', 'ogg', 'flac', 'aac']:
53
+ return converter.convert_audio(f"output.{format}", format), None, "Converted audio successfully!"
54
+ else:
55
+ return None, None, "Unsupported format!"
56
+ except Exception as e:
57
+ return None, None, str(e)
video_watermark_remover.py ADDED
@@ -0,0 +1,157 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ import io
4
+ import ffmpeg
5
+ import requests
6
+ from PIL import Image
7
+ import shutil
8
+ import concurrent.futures
9
+ import gradio as gr
10
+ import cv2
11
+ import re
12
+
13
+ def process_image(mask_data, image_path):
14
+ image = Image.open(image_path)
15
+ image_data = io.BytesIO()
16
+ image.save(image_data, format=image.format)
17
+ image_data = image_data.getvalue()
18
+
19
+ # Prepare form data
20
+ form_data = {
21
+ 'ldmSteps': 25,
22
+ 'ldmSampler': 'plms',
23
+ 'zitsWireframe': True,
24
+ 'hdStrategy': 'Original',
25
+ 'hdStrategyCropMargin': 196,
26
+ 'hdStrategyCropTrigerSize': 1280,
27
+ 'hdStrategyResizeLimit': 2048,
28
+ 'prompt': '',
29
+ 'negativePrompt': '',
30
+ 'croperX': -24,
31
+ 'croperY': -23,
32
+ 'croperHeight': 512,
33
+ 'croperWidth': 512,
34
+ 'useCroper': False,
35
+ 'sdMaskBlur': 5,
36
+ 'sdStrength': 0.75,
37
+ 'sdSteps': 50,
38
+ 'sdGuidanceScale': 7.5,
39
+ 'sdSampler': 'pndm',
40
+ 'sdSeed': 42,
41
+ 'sdMatchHistograms': False,
42
+ 'sdScale': 1,
43
+ 'cv2Radius': 5,
44
+ 'cv2Flag': 'INPAINT_NS',
45
+ 'paintByExampleSteps': 50,
46
+ 'paintByExampleGuidanceScale': 7.5,
47
+ 'paintByExampleSeed': 42,
48
+ 'paintByExampleMaskBlur': 5,
49
+ 'paintByExampleMatchHistograms': False,
50
+ 'sizeLimit': 1024,
51
+ }
52
+
53
+ files_data = {
54
+ 'image': (os.path.basename(image_path), image_data),
55
+ 'mask': ('mask.png', mask_data)
56
+ }
57
+
58
+ response = requests.post('https://ahmedghani-lama-cleaner-lama.hf.space/inpaint', data=form_data, files=files_data)
59
+
60
+ if response.headers['Content-Type'] == 'image/jpeg' or response.headers['Content-Type'] == 'image/png':
61
+ output_image_path = os.path.join('output_images', os.path.splitext(os.path.basename(image_path))[0] + '_inpainted' + os.path.splitext(image_path)[1])
62
+ with open(output_image_path, 'wb') as output_image_file:
63
+ output_image_file.write(response.content)
64
+ else:
65
+ print(f"Error processing {image_path}: {response.text}")
66
+
67
+ def remove_watermark(sketch, images_path='frames', output_path='output_images'):
68
+ if os.path.exists('output_images'):
69
+ shutil.rmtree('output_images')
70
+ os.makedirs('output_images')
71
+
72
+ mask_data = io.BytesIO()
73
+ sketch["mask"].save(mask_data, format=sketch["mask"].format)
74
+ mask_data = mask_data.getvalue()
75
+
76
+ image_paths = glob.glob(f'{images_path}/*.*')
77
+
78
+ with concurrent.futures.ThreadPoolExecutor() as executor:
79
+ executor.map(lambda image_path: process_image(mask_data, image_path), image_paths)
80
+
81
+ return gr.Video.update(value=convert_frames_to_video('output_images'), visible=True), gr.Button.update(value='Done!')
82
+
83
+ # def convert_video_to_frames(video):
84
+ # print(f" input video is : {video}")
85
+ # if os.path.exists('input_video.mp4'):
86
+ # os.remove('input_video.mp4')
87
+
88
+ # ffmpeg.input(video).output('input_video.mp4').run()
89
+ # video_path = 'input_video.mp4'
90
+
91
+ # if os.path.exists('frames'):
92
+ # shutil.rmtree('frames')
93
+ # os.makedirs('frames')
94
+
95
+ # video_name = os.path.splitext(os.path.basename(video_path))[0]
96
+ # ffmpeg.input(video_path).output(f'frames/{video_name}_%d.jpg', qscale=2).run()
97
+ # return gr.Image.update(value=f"{os.getcwd()}/frames/{video_name}_1.jpg", interactive=True), gr.Button.update(interactive=True)
98
+
99
+ # def convert_frames_to_video(frames_path):
100
+ # if os.path.exists('output_video.mp4'):
101
+ # os.remove('output_video.mp4')
102
+
103
+ # (
104
+ # ffmpeg
105
+ # .input(f'{frames_path}/*.jpg', pattern_type='glob', framerate=25)
106
+ # .output('output_video.mp4')
107
+ # .run()
108
+ # )
109
+ # return gr.Video.update(value='output_video.mp4', visible=True, interactive=True), gr.Button.update(interactive=False)
110
+
111
+
112
+ def convert_video_to_frames(video):
113
+ if os.path.exists('input_video.mp4'):
114
+ os.remove('input_video.mp4')
115
+
116
+ os.system(f"ffmpeg -i {video} input_video.mp4")
117
+ video_path = 'input_video.mp4'
118
+
119
+ if os.path.exists('frames'):
120
+ shutil.rmtree('frames')
121
+ os.makedirs('frames')
122
+
123
+ video_name = os.path.splitext(os.path.basename(video_path))[0]
124
+ vidcap = cv2.VideoCapture(video_path)
125
+ success, image = vidcap.read()
126
+ count = 1
127
+ while success:
128
+ cv2.imwrite(f"frames/{video_name}_{count}.jpg", image)
129
+ success, image = vidcap.read()
130
+ count += 1
131
+
132
+ return gr.Image.update(value=f"{os.getcwd()}/frames/{video_name}_1.jpg", interactive=True), gr.Button.update(interactive=True)
133
+
134
+ def convert_frames_to_video(frames_path):
135
+ if os.path.exists('output_video.mp4'):
136
+ os.remove('output_video.mp4')
137
+
138
+ img_array = []
139
+ filelist = glob.glob(f"{frames_path}/*.jpg")
140
+
141
+ # Sort frames by number
142
+ frame_numbers = [int(re.findall(r'\d+', os.path.basename(frame))[0]) for frame in filelist]
143
+ sorted_frames = [frame for _, frame in sorted(zip(frame_numbers, filelist), key=lambda pair: pair[0])]
144
+
145
+ for filename in sorted_frames:
146
+ img = cv2.imread(filename)
147
+ height, width, layers = img.shape
148
+ size = (width, height)
149
+ img_array.append(img)
150
+
151
+ out = cv2.VideoWriter('output_video.mp4', cv2.VideoWriter_fourcc(*'mp4v'), 25, size)
152
+
153
+ for i in range(len(img_array)):
154
+ out.write(img_array[i])
155
+ out.release()
156
+
157
+ return gr.Video.update(value='output_video.mp4', visible=True, interactive=True), gr.Button.update(interactive=False)