spookyuser commited on
Commit
2224f3b
1 Parent(s): 77f594c

add output_dir

Browse files
Files changed (2) hide show
  1. animate.py +42 -28
  2. app.py +17 -7
animate.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  import sys
 
3
 
4
  import cv2
5
  import mediapy
@@ -10,7 +11,6 @@ from image_tools.sizes import resize_and_crop
10
  from moviepy.editor import CompositeVideoClip
11
  from moviepy.editor import VideoFileClip as vfc
12
  from PIL import Image
13
- from pathlib import Path
14
 
15
 
16
  # get key positions at which frame needs to be generated
@@ -21,11 +21,11 @@ def list_of_positions(num_contours, num_frames=100):
21
  return positions
22
 
23
 
24
- def contourfinder(image1, image2, text=None, num_frames=100):
25
  # Create two blank pages to write into
26
  # I just hardcoded 1024*1024 as the size, ideally this should be np.shape(image1)
27
- blank=np.zeros(np.shape(image1), dtype="uint8")
28
- blank2=np.zeros(np.shape(image2), dtype="uint8")
29
  # Threshold and contours for image 1 and 2
30
  threshold = cv2.Canny(image=image1, threshold1=100, threshold2=200)
31
  contours, hierarchies = cv2.findContours(
@@ -39,13 +39,22 @@ def contourfinder(image1, image2, text=None, num_frames=100):
39
 
40
  # Initialize three empty videos
41
  vid1 = cv2.VideoWriter(
42
- "vid1.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 24, threshold.shape
 
 
 
43
  )
44
  vid2 = cv2.VideoWriter(
45
- "vid2.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 24, threshold.shape
 
 
 
46
  )
47
  text_vid = cv2.VideoWriter(
48
- "text_vid.mp4", cv2.VideoWriter_fourcc(*"mp4v"), 10, threshold.shape
 
 
 
49
  )
50
 
51
  # Get positions
@@ -62,10 +71,9 @@ def contourfinder(image1, image2, text=None, num_frames=100):
62
  frames.append(blank)
63
  # Complile to video
64
  vid1.write(blank)
65
-
66
  vid1.release()
67
- full_dir_vid_1 = Path("vid1.mp4").resolve().as_posix()
68
- clip1 = vfc(full_dir_vid_1)
69
  positions = list_of_positions((len(contours2)))
70
 
71
  for i in range(0, len(contours2)):
@@ -78,8 +86,7 @@ def contourfinder(image1, image2, text=None, num_frames=100):
78
  vid2.write(blank2)
79
 
80
  vid2.release()
81
- full_dir_vid_2 = Path("vid2.mp4").resolve().as_posix()
82
- clip3 = vfc(full_dir_vid_2)
83
 
84
  # Next is the text vid
85
 
@@ -105,7 +112,7 @@ def contourfinder(image1, image2, text=None, num_frames=100):
105
  def text_frames(text, image, org):
106
  spacing = 55 # spacing between letters
107
  blink = image
108
- cv2.imwrite(f"blink.png", blink)
109
  for i in range(0, len(text) - 1):
110
 
111
  text_vid.write(blink)
@@ -123,7 +130,7 @@ def contourfinder(image1, image2, text=None, num_frames=100):
123
  print(org)
124
 
125
  # Displaying the image
126
- cv2.imwrite(f"text_im{i}.png", image)
127
 
128
  # Complile to video
129
  text_vid.write(image)
@@ -159,10 +166,7 @@ def resize(width, img):
159
  return img
160
 
161
 
162
-
163
-
164
-
165
- def resize_img(img1, img2):
166
  img_target_size = Image.open(img1)
167
  img_to_resize = resize_and_crop(
168
  img2,
@@ -172,10 +176,12 @@ def resize_img(img1, img2):
172
  ), # set width and height to match cv2_images[0]
173
  crop_origin="middle",
174
  )
175
- img_to_resize.save("resized_img2.png")
176
 
177
 
178
- def get_video_frames(images, times_to_interpolate=6, model_name_index=0):
 
 
179
  frame1 = images[0]
180
  frame2 = images[1]
181
 
@@ -185,11 +191,14 @@ def get_video_frames(images, times_to_interpolate=6, model_name_index=0):
185
  frame1 = resize(256, frame1)
186
  frame2 = resize(256, frame2)
187
 
188
- frame1.save("test1.png")
189
- frame2.save("test2.png")
190
 
191
- resize_img("test1.png", "test2.png")
192
- input_frames = ["test1.png", "resized_img2.png"]
 
 
 
193
 
194
  frames = list(
195
  util.interpolate_recursively_from_files(
@@ -200,10 +209,15 @@ def get_video_frames(images, times_to_interpolate=6, model_name_index=0):
200
 
201
 
202
  def create_mp4_with_audio(frames, cv2_images, duration, audio, output_path):
203
- temp_vid_path = "TEMP.mp4"
 
204
  mediapy.write_video(temp_vid_path, frames, fps=5)
205
- print(f"TYPES....{type(cv2_images[0])},{type(cv2_images[1])} SHAPES{cv2_images[0].shape} Img {cv2_images[0]}")
206
- clip1, clip3 = contourfinder(cv2_images[0], cv2_images[1]) # has a third text option
 
 
 
 
207
 
208
  # Use open CV and moviepy code
209
  # So we move from open CV video 1 to out.mp4 to open CV video2
@@ -212,7 +226,7 @@ def create_mp4_with_audio(frames, cv2_images, duration, audio, output_path):
212
  clip3 = clip3.set_start((clip1.duration - 0.5) + (clip2.duration)).crossfadein(2)
213
 
214
  new_clip = CompositeVideoClip([clip1, clip2, clip3])
215
- new_clip.audio = audio
216
  new_clip.set_duration(duration)
217
  new_clip.write_videofile(output_path, audio_codec="aac")
218
  return output_path
 
1
  import os
2
  import sys
3
+ from pathlib import Path
4
 
5
  import cv2
6
  import mediapy
 
11
  from moviepy.editor import CompositeVideoClip
12
  from moviepy.editor import VideoFileClip as vfc
13
  from PIL import Image
 
14
 
15
 
16
  # get key positions at which frame needs to be generated
 
21
  return positions
22
 
23
 
24
+ def contourfinder(image1, image2, text=None, num_frames=100, output_dir="temp"):
25
  # Create two blank pages to write into
26
  # I just hardcoded 1024*1024 as the size, ideally this should be np.shape(image1)
27
+ blank = np.zeros(np.shape(image1), dtype="uint8")
28
+ blank2 = np.zeros(np.shape(image2), dtype="uint8")
29
  # Threshold and contours for image 1 and 2
30
  threshold = cv2.Canny(image=image1, threshold1=100, threshold2=200)
31
  contours, hierarchies = cv2.findContours(
 
39
 
40
  # Initialize three empty videos
41
  vid1 = cv2.VideoWriter(
42
+ Path(output_dir / "vid1.mp4"),
43
+ cv2.VideoWriter_fourcc(*"mp4v"),
44
+ 24,
45
+ threshold.shape,
46
  )
47
  vid2 = cv2.VideoWriter(
48
+ Path(output_dir / "vid2.mp4"),
49
+ cv2.VideoWriter_fourcc(*"mp4v"),
50
+ 24,
51
+ threshold.shape,
52
  )
53
  text_vid = cv2.VideoWriter(
54
+ Path(output_dir / "text_video.mp4"),
55
+ cv2.VideoWriter_fourcc(*"mp4v"),
56
+ 10,
57
+ threshold.shape,
58
  )
59
 
60
  # Get positions
 
71
  frames.append(blank)
72
  # Complile to video
73
  vid1.write(blank)
74
+
75
  vid1.release()
76
+ clip1 = vfc(Path(output_dir / "vid1.mp4"))
 
77
  positions = list_of_positions((len(contours2)))
78
 
79
  for i in range(0, len(contours2)):
 
86
  vid2.write(blank2)
87
 
88
  vid2.release()
89
+ clip3 = vfc(Path(output_dir / "vid2.mp4"))
 
90
 
91
  # Next is the text vid
92
 
 
112
  def text_frames(text, image, org):
113
  spacing = 55 # spacing between letters
114
  blink = image
115
+ cv2.imwrite(Path(output_dir / "blink.png"), blink)
116
  for i in range(0, len(text) - 1):
117
 
118
  text_vid.write(blink)
 
130
  print(org)
131
 
132
  # Displaying the image
133
+ cv2.imwrite(Path(output_dir / f"text_im{i}.png"), image)
134
 
135
  # Complile to video
136
  text_vid.write(image)
 
166
  return img
167
 
168
 
169
+ def resize_img(img1, img2, output_dir):
 
 
 
170
  img_target_size = Image.open(img1)
171
  img_to_resize = resize_and_crop(
172
  img2,
 
176
  ), # set width and height to match cv2_images[0]
177
  crop_origin="middle",
178
  )
179
+ img_to_resize.save(Path(output_dir / "resized_img2.png"))
180
 
181
 
182
+ def get_video_frames(
183
+ images, vid_output_dir="temp", times_to_interpolate=6, model_name_index=0
184
+ ):
185
  frame1 = images[0]
186
  frame2 = images[1]
187
 
 
191
  frame1 = resize(256, frame1)
192
  frame2 = resize(256, frame2)
193
 
194
+ frame1.save(Path(vid_output_dir / "test1.png"))
195
+ frame2.save(Path(vid_output_dir / "test2.png"))
196
 
197
+ resize_img("test1.png", "test2.png", vid_output_dir)
198
+ input_frames = [
199
+ Path(vid_output_dir / "test1.png"),
200
+ Path(vid_output_dir / "resized_img2.png"),
201
+ ]
202
 
203
  frames = list(
204
  util.interpolate_recursively_from_files(
 
209
 
210
 
211
  def create_mp4_with_audio(frames, cv2_images, duration, audio, output_path):
212
+ vid_output_dir = output_path.parent
213
+ temp_vid_path = Path(vid_output_dir / "TEMP.mp4")
214
  mediapy.write_video(temp_vid_path, frames, fps=5)
215
+ print(
216
+ f"TYPES....{type(cv2_images[0])},{type(cv2_images[1])} SHAPES{cv2_images[0].shape} Img {cv2_images[0]}"
217
+ )
218
+ clip1, clip3 = contourfinder(
219
+ cv2_images[0], cv2_images[1]
220
+ ) # has a third text option
221
 
222
  # Use open CV and moviepy code
223
  # So we move from open CV video 1 to out.mp4 to open CV video2
 
226
  clip3 = clip3.set_start((clip1.duration - 0.5) + (clip2.duration)).crossfadein(2)
227
 
228
  new_clip = CompositeVideoClip([clip1, clip2, clip3])
229
+ new_clip.audio = audio # Naviely append audio without considering the length of the video, could be a problem, no idea, but it works, so I'm not touching it
230
  new_clip.set_duration(duration)
231
  new_clip.write_videofile(output_path, audio_codec="aac")
232
  return output_path
app.py CHANGED
@@ -1,9 +1,10 @@
1
  import os
2
- import sys
3
  import shutil
4
  import subprocess
 
5
  import uuid
6
  from pathlib import Path
 
7
  import gradio as gr
8
  from moviepy.editor import AudioFileClip
9
 
@@ -11,7 +12,6 @@ output_dir = Path("temp/").absolute()
11
  output_dir.mkdir(exist_ok=True, parents=True)
12
 
13
 
14
-
15
  class SpotifyApi:
16
  spotify_directory = Path("spotify")
17
  final_directory = output_dir
@@ -77,7 +77,10 @@ def process_inputs(
77
 
78
 
79
  def animate_images(image_paths: list[str], audio_input: AudioInput) -> str:
80
- from animate import create_mp4_with_audio, get_video_frames # Only import after git clone and when necessary takes loooong
 
 
 
81
 
82
  # Generate a random folder name and change directories to there
83
  foldername = str(uuid.uuid4())[:8]
@@ -87,7 +90,7 @@ def animate_images(image_paths: list[str], audio_input: AudioInput) -> str:
87
  audio_clip = audio_clip.subclip(
88
  audio_input.start_time, audio_input.start_time + audio_input.run_for
89
  )
90
- video_frames, cv2_images = get_video_frames(image_paths)
91
  path = Path(vid_output_dir / "output_final.mp4").as_posix()
92
  return create_mp4_with_audio(
93
  video_frames, cv2_images, audio_clip.duration, audio_clip, path
@@ -112,12 +115,19 @@ iface = gr.Interface(
112
  outputs="video",
113
  )
114
 
115
- if __name__ == '__main__':
116
- subprocess.call(["git", "clone", "https://github.com/google-research/frame-interpolation", "frame_interpolation"]) # install frame_interplation I guess
 
 
 
 
 
 
 
117
  sys.path.append("frame_interpolation")
118
 
119
  # My installs
120
  os.chdir(
121
  output_dir
122
  ) # change working directory to output_dir because the hf spaces model has no option to specify output directory ¯\_(ツ)_/¯
123
- iface.launch(share=True)
 
1
  import os
 
2
  import shutil
3
  import subprocess
4
+ import sys
5
  import uuid
6
  from pathlib import Path
7
+
8
  import gradio as gr
9
  from moviepy.editor import AudioFileClip
10
 
 
12
  output_dir.mkdir(exist_ok=True, parents=True)
13
 
14
 
 
15
  class SpotifyApi:
16
  spotify_directory = Path("spotify")
17
  final_directory = output_dir
 
77
 
78
 
79
  def animate_images(image_paths: list[str], audio_input: AudioInput) -> str:
80
+ from animate import ( # Only import after git clone and when necessary takes loooong
81
+ create_mp4_with_audio,
82
+ get_video_frames,
83
+ )
84
 
85
  # Generate a random folder name and change directories to there
86
  foldername = str(uuid.uuid4())[:8]
 
90
  audio_clip = audio_clip.subclip(
91
  audio_input.start_time, audio_input.start_time + audio_input.run_for
92
  )
93
+ video_frames, cv2_images = get_video_frames(image_paths, vid_output_dir)
94
  path = Path(vid_output_dir / "output_final.mp4").as_posix()
95
  return create_mp4_with_audio(
96
  video_frames, cv2_images, audio_clip.duration, audio_clip, path
 
115
  outputs="video",
116
  )
117
 
118
+ if __name__ == "__main__":
119
+ subprocess.call(
120
+ [
121
+ "git",
122
+ "clone",
123
+ "https://github.com/google-research/frame-interpolation",
124
+ "frame_interpolation",
125
+ ]
126
+ ) # install frame_interplation I guess
127
  sys.path.append("frame_interpolation")
128
 
129
  # My installs
130
  os.chdir(
131
  output_dir
132
  ) # change working directory to output_dir because the hf spaces model has no option to specify output directory ¯\_(ツ)_/¯
133
+ iface.launch(share=True)