Spaces:
Sleeping
Sleeping
Prudvireddy
commited on
Commit
•
9b6561b
1
Parent(s):
f636163
Update tools.py
Browse files
tools.py
CHANGED
@@ -1,421 +1,424 @@
|
|
1 |
-
from langchain.tools import tool, Tool
|
2 |
-
import re
|
3 |
-
import os
|
4 |
-
from langchain_groq import ChatGroq
|
5 |
-
import requests
|
6 |
-
import cv2
|
7 |
-
from moviepy.editor import ImageClip, AudioFileClip, concatenate_videoclips
|
8 |
-
from langchain.pydantic_v1 import BaseModel, Field
|
9 |
-
from langchain_community.tools import WikipediaQueryRun
|
10 |
-
from langchain_community.utilities import WikipediaAPIWrapper
|
11 |
-
|
12 |
-
# from diffusers import StableDiffusionXLPipeline, DPMSolverSinglestepScheduler
|
13 |
-
# import bitsandbytes as bnb
|
14 |
-
# import torch.nn as nn
|
15 |
-
# import torch
|
16 |
-
import pyttsx3
|
17 |
-
import
|
18 |
-
# from langchain_google_genai import ChatGoogleGenerativeAI
|
19 |
-
|
20 |
-
# from langchain.chat_models import ChatOpenAI
|
21 |
-
# # llm2 = ChatOpenAI(model='gpt-3.5-turbo')
|
22 |
-
# # llm3 = ChatOpenAI(model='gpt-3.5-turbo')
|
23 |
-
# llm1 = ChatGroq(model='llama3-70b-8192', temperature=0.6, max_tokens=2048)
|
24 |
-
# # llm2 = ChatGroq(model='mixtral-8x7b-32768', temperature=0.6, max_tokens=2048, api_key='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')
|
25 |
-
# llm2 = ChatGoogleGenerativeAI(model='gemini-pro', temperature=0.0)
|
26 |
-
# # llm2 = ChatGroq(model='llama3-70b-8192', temperature=0.6, max_tokens=2048, api_key='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')
|
27 |
-
# # llm3 = ChatGoogleGenerativeAI(model='gemini-pro')
|
28 |
-
# llm4 = ChatGroq(model='llama3-70b-8192', temperature=0.6, max_tokens=2048, api_key='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')
|
29 |
-
|
30 |
-
|
31 |
-
|
32 |
-
# pipe
|
33 |
-
|
34 |
-
|
35 |
-
#
|
36 |
-
|
37 |
-
|
38 |
-
#
|
39 |
-
#
|
40 |
-
#
|
41 |
-
|
42 |
-
|
43 |
-
#
|
44 |
-
#
|
45 |
-
#
|
46 |
-
#
|
47 |
-
#
|
48 |
-
#
|
49 |
-
|
50 |
-
|
51 |
-
#
|
52 |
-
# quantized_layer.
|
53 |
-
#
|
54 |
-
#
|
55 |
-
#
|
56 |
-
|
57 |
-
|
58 |
-
|
59 |
-
|
60 |
-
# pipe.
|
61 |
-
|
62 |
-
|
63 |
-
|
64 |
-
|
65 |
-
"""
|
66 |
-
|
67 |
-
|
68 |
-
|
69 |
-
|
70 |
-
|
71 |
-
|
72 |
-
|
73 |
-
|
74 |
-
|
75 |
-
|
76 |
-
|
77 |
-
|
78 |
-
|
79 |
-
|
80 |
-
|
81 |
-
|
82 |
-
|
83 |
-
|
84 |
-
|
85 |
-
engine.
|
86 |
-
|
87 |
-
|
88 |
-
|
89 |
-
|
90 |
-
#
|
91 |
-
|
92 |
-
#
|
93 |
-
|
94 |
-
#
|
95 |
-
#
|
96 |
-
#
|
97 |
-
|
98 |
-
#
|
99 |
-
|
100 |
-
#
|
101 |
-
#
|
102 |
-
|
103 |
-
#
|
104 |
-
|
105 |
-
#
|
106 |
-
|
107 |
-
#
|
108 |
-
|
109 |
-
|
110 |
-
#
|
111 |
-
|
112 |
-
|
113 |
-
#
|
114 |
-
|
115 |
-
|
116 |
-
#
|
117 |
-
|
118 |
-
|
119 |
-
#
|
120 |
-
|
121 |
-
#
|
122 |
-
|
123 |
-
|
124 |
-
#
|
125 |
-
|
126 |
-
|
127 |
-
#
|
128 |
-
|
129 |
-
#
|
130 |
-
|
131 |
-
#
|
132 |
-
|
133 |
-
#
|
134 |
-
|
135 |
-
#
|
136 |
-
#
|
137 |
-
#
|
138 |
-
|
139 |
-
#
|
140 |
-
|
141 |
-
#
|
142 |
-
#
|
143 |
-
|
144 |
-
#
|
145 |
-
|
146 |
-
#
|
147 |
-
|
148 |
-
#
|
149 |
-
|
150 |
-
#
|
151 |
-
|
152 |
-
#
|
153 |
-
|
154 |
-
|
155 |
-
|
156 |
-
|
157 |
-
|
158 |
-
#
|
159 |
-
|
160 |
-
#
|
161 |
-
|
162 |
-
#
|
163 |
-
|
164 |
-
|
165 |
-
|
166 |
-
|
167 |
-
|
168 |
-
import
|
169 |
-
from
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
|
187 |
-
|
188 |
-
|
189 |
-
|
190 |
-
|
191 |
-
|
192 |
-
|
193 |
-
|
194 |
-
|
195 |
-
|
196 |
-
|
197 |
-
|
198 |
-
|
199 |
-
|
200 |
-
|
201 |
-
|
202 |
-
|
203 |
-
|
204 |
-
|
205 |
-
|
206 |
-
|
207 |
-
|
208 |
-
|
209 |
-
|
210 |
-
|
211 |
-
|
212 |
-
|
213 |
-
|
214 |
-
|
215 |
-
|
216 |
-
|
217 |
-
|
218 |
-
|
219 |
-
|
220 |
-
|
221 |
-
|
222 |
-
|
223 |
-
|
224 |
-
|
225 |
-
|
226 |
-
|
227 |
-
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
|
248 |
-
|
249 |
-
|
250 |
-
|
251 |
-
|
252 |
-
|
253 |
-
|
254 |
-
|
255 |
-
|
256 |
-
|
257 |
-
|
258 |
-
|
259 |
-
|
260 |
-
|
261 |
-
|
262 |
-
|
263 |
-
|
264 |
-
|
265 |
-
|
266 |
-
|
267 |
-
|
268 |
-
|
269 |
-
|
270 |
-
|
271 |
-
|
272 |
-
|
273 |
-
|
274 |
-
|
275 |
-
|
276 |
-
|
277 |
-
|
278 |
-
|
279 |
-
|
280 |
-
|
281 |
-
|
282 |
-
|
283 |
-
|
284 |
-
|
285 |
-
|
286 |
-
|
287 |
-
|
288 |
-
|
289 |
-
|
290 |
-
|
291 |
-
|
292 |
-
|
293 |
-
|
294 |
-
|
295 |
-
|
296 |
-
|
297 |
-
|
298 |
-
|
299 |
-
|
300 |
-
|
301 |
-
|
302 |
-
|
303 |
-
|
304 |
-
|
305 |
-
|
306 |
-
|
307 |
-
|
308 |
-
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
|
313 |
-
|
314 |
-
|
315 |
-
|
316 |
-
|
317 |
-
|
318 |
-
|
319 |
-
|
320 |
-
|
321 |
-
|
322 |
-
|
323 |
-
|
324 |
-
|
325 |
-
|
326 |
-
|
327 |
-
|
328 |
-
|
329 |
-
|
330 |
-
|
331 |
-
|
332 |
-
#
|
333 |
-
#
|
334 |
-
|
335 |
-
|
336 |
-
|
337 |
-
|
338 |
-
|
339 |
-
|
340 |
-
|
341 |
-
|
342 |
-
|
343 |
-
|
344 |
-
|
345 |
-
|
346 |
-
|
347 |
-
|
348 |
-
|
349 |
-
|
350 |
-
|
351 |
-
|
352 |
-
|
353 |
-
|
354 |
-
|
355 |
-
|
356 |
-
|
357 |
-
|
358 |
-
|
359 |
-
|
360 |
-
|
361 |
-
|
362 |
-
|
363 |
-
|
364 |
-
|
365 |
-
|
366 |
-
|
367 |
-
|
368 |
-
|
369 |
-
|
370 |
-
|
371 |
-
images_dir
|
372 |
-
|
373 |
-
|
374 |
-
|
375 |
-
|
376 |
-
|
377 |
-
|
378 |
-
|
379 |
-
|
380 |
-
|
381 |
-
|
382 |
-
|
383 |
-
|
384 |
-
|
385 |
-
|
386 |
-
|
387 |
-
|
388 |
-
|
389 |
-
|
390 |
-
|
391 |
-
|
392 |
-
|
393 |
-
|
394 |
-
|
395 |
-
|
396 |
-
|
397 |
-
|
398 |
-
|
399 |
-
|
400 |
-
|
401 |
-
|
402 |
-
|
403 |
-
|
404 |
-
|
405 |
-
|
406 |
-
|
407 |
-
|
408 |
-
|
409 |
-
speech_dir
|
410 |
-
|
411 |
-
|
412 |
-
|
413 |
-
|
414 |
-
|
415 |
-
|
416 |
-
|
417 |
-
|
418 |
-
|
419 |
-
|
420 |
-
|
|
|
|
|
|
|
421 |
return f'speechs generated.'#f'speech generated for "{text}" and saved to directory {speech_dir} as speech{num}.mp3'
|
|
|
1 |
+
from langchain.tools import tool, Tool
|
2 |
+
import re
|
3 |
+
import os
|
4 |
+
from langchain_groq import ChatGroq
|
5 |
+
import requests
|
6 |
+
import cv2
|
7 |
+
from moviepy.editor import ImageClip, AudioFileClip, concatenate_videoclips
|
8 |
+
from langchain.pydantic_v1 import BaseModel, Field
|
9 |
+
from langchain_community.tools import WikipediaQueryRun
|
10 |
+
from langchain_community.utilities import WikipediaAPIWrapper
|
11 |
+
|
12 |
+
# from diffusers import StableDiffusionXLPipeline, DPMSolverSinglestepScheduler
|
13 |
+
# import bitsandbytes as bnb
|
14 |
+
# import torch.nn as nn
|
15 |
+
# import torch
|
16 |
+
import pyttsx3
|
17 |
+
# from agents import get_agents_and_tasks
|
18 |
+
# from langchain_google_genai import ChatGoogleGenerativeAI
|
19 |
+
|
20 |
+
# from langchain.chat_models import ChatOpenAI
|
21 |
+
# # llm2 = ChatOpenAI(model='gpt-3.5-turbo')
|
22 |
+
# # llm3 = ChatOpenAI(model='gpt-3.5-turbo')
|
23 |
+
# llm1 = ChatGroq(model='llama3-70b-8192', temperature=0.6, max_tokens=2048)
|
24 |
+
# # llm2 = ChatGroq(model='mixtral-8x7b-32768', temperature=0.6, max_tokens=2048, api_key='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')
|
25 |
+
# llm2 = ChatGoogleGenerativeAI(model='gemini-pro', temperature=0.0)
|
26 |
+
# # llm2 = ChatGroq(model='llama3-70b-8192', temperature=0.6, max_tokens=2048, api_key='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')
|
27 |
+
# # llm3 = ChatGoogleGenerativeAI(model='gemini-pro')
|
28 |
+
# llm4 = ChatGroq(model='llama3-70b-8192', temperature=0.6, max_tokens=2048, api_key='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')
|
29 |
+
# groq_api_key=os.environ.get('GROQ_API_KEY')
|
30 |
+
# llm = ChatGroq(model='llama3-70b-8192', temperature=0.6, max_tokens=1024, api_key=groq_api_key)
|
31 |
+
|
32 |
+
# pipe = StableDiffusionXLPipeline.from_pretrained("sd-community/sdxl-flash", torch_dtype=torch.float16).to('cuda')
|
33 |
+
# pipe.scheduler = DPMSolverSinglestepScheduler.from_config(pipe.scheduler.config, timestep_spacing="trailing")
|
34 |
+
|
35 |
+
# def quantize_model_to_4bit(model):
|
36 |
+
# replacements = []
|
37 |
+
|
38 |
+
# # Collect layers to be replaced
|
39 |
+
# for name, module in model.named_modules():
|
40 |
+
# if isinstance(module, nn.Linear):
|
41 |
+
# replacements.append((name, module))
|
42 |
+
|
43 |
+
# # Replace layers
|
44 |
+
# for name, module in replacements:
|
45 |
+
# # Split the name to navigate to the parent module
|
46 |
+
# *path, last = name.split('.')
|
47 |
+
# parent = model
|
48 |
+
# for part in path:
|
49 |
+
# parent = getattr(parent, part)
|
50 |
+
|
51 |
+
# # Create and assign the quantized layer
|
52 |
+
# quantized_layer = bnb.nn.Linear4bit(module.in_features, module.out_features, bias=module.bias is not None)
|
53 |
+
# quantized_layer.weight.data = module.weight.data
|
54 |
+
# if module.bias is not None:
|
55 |
+
# quantized_layer.bias.data = module.bias.data
|
56 |
+
# setattr(parent, last, quantized_layer)
|
57 |
+
|
58 |
+
# return model
|
59 |
+
|
60 |
+
# pipe.unet = quantize_model_to_4bit(pipe.unet)
|
61 |
+
# pipe.enable_model_cpu_offload()
|
62 |
+
|
63 |
+
|
64 |
+
def generate_speech(text, speech_dir='./outputs/audio', lang='en', speed=170, voice='default', num=0):
|
65 |
+
"""
|
66 |
+
Generates speech for given script.
|
67 |
+
"""
|
68 |
+
engine = pyttsx3.init()
|
69 |
+
|
70 |
+
# Set language and voice
|
71 |
+
voices = engine.getProperty('voices')
|
72 |
+
if voice == 'default':
|
73 |
+
voice_id = voices[1].id
|
74 |
+
else:
|
75 |
+
# Try to find the voice with the given name
|
76 |
+
voice_id = None
|
77 |
+
for v in voices:
|
78 |
+
if voice in v.name:
|
79 |
+
voice_id = v.id
|
80 |
+
break
|
81 |
+
if not voice_id:
|
82 |
+
raise ValueError(f"Voice '{voice}' not found.")
|
83 |
+
|
84 |
+
engine.setProperty('voice', voice_id)
|
85 |
+
engine.setProperty('rate', speed)
|
86 |
+
os.remove(os.path.join(os.path.dirname(os.path.abspath(__file__)), speech_dir, f'speech_{num}.mp3')) if os.path.exists(os.path.join(speech_dir, f'speech_{num}.mp3')) else None
|
87 |
+
engine.save_to_file(text, os.path.join(os.path.dirname(os.path.abspath(__file__)), speech_dir, f'speech_{num}.mp3'))
|
88 |
+
engine.runAndWait()
|
89 |
+
|
90 |
+
# class VideoGeneration(BaseModel):
|
91 |
+
# images_dir : str = Field(description='Path to images directory, such as "outputs/images"')
|
92 |
+
# speeches_dir : str = Field(description='Path to speeches directory, such as "outputs/speeches"')
|
93 |
+
|
94 |
+
# @tool(args_schema=VideoGeneration)
|
95 |
+
# def create_video_from_images_and_audio(images_dir, speeches_dir, zoom_factor=1.2):
|
96 |
+
# """Creates video using images and audios with zoom-in effect"""
|
97 |
+
# images_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), images_dir)
|
98 |
+
# speeches_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), speeches_dir)
|
99 |
+
|
100 |
+
# images_paths = os.listdir(images_dir)
|
101 |
+
# audio_paths = os.listdir(speeches_dir)
|
102 |
+
# # print(images_paths, audio_paths)
|
103 |
+
# clips = []
|
104 |
+
|
105 |
+
# for i in range(min(len(images_paths), len(audio_paths))):
|
106 |
+
# # Load the image
|
107 |
+
# img_clip = ImageClip(os.path.join(images_dir, images_paths[i]))
|
108 |
+
|
109 |
+
# # Load the audio file
|
110 |
+
# audioclip = AudioFileClip(os.path.join(speeches_dir, audio_paths[i]))
|
111 |
+
|
112 |
+
# # Set the duration of the video clip to the duration of the audio file
|
113 |
+
# videoclip = img_clip.set_duration(audioclip.duration)
|
114 |
+
|
115 |
+
# # Apply zoom-in effect to the video clip
|
116 |
+
# zoomed_clip = apply_zoom_in_effect(videoclip, zoom_factor)
|
117 |
+
|
118 |
+
# # Add audio to the zoomed video clip
|
119 |
+
# zoomed_clip = zoomed_clip.set_audio(audioclip)
|
120 |
+
|
121 |
+
# clips.append(zoomed_clip)
|
122 |
+
|
123 |
+
# # Concatenate all video clips
|
124 |
+
# final_clip = concatenate_videoclips(clips)
|
125 |
+
|
126 |
+
# # Write the result to a file
|
127 |
+
# final_clip.write_videofile(os.path.join(os.path.dirname(os.path.abspath(__file__)), "outputs/final_video/final_video.mp4"), codec='libx264', fps=24)
|
128 |
+
|
129 |
+
# return os.path.join(os.path.dirname(os.path.abspath(__file__)), "outputs/final_video/final_video.mp4")
|
130 |
+
|
131 |
+
# def apply_zoom_in_effect(clip, zoom_factor=1.2):
|
132 |
+
# width, height = clip.size
|
133 |
+
# duration = clip.duration
|
134 |
+
|
135 |
+
# def zoom_in_effect(get_frame, t):
|
136 |
+
# frame = get_frame(t)
|
137 |
+
# zoom = 1 + (zoom_factor - 1) * (t / duration)
|
138 |
+
# new_width, new_height = int(width * zoom), int(height * zoom)
|
139 |
+
# resized_frame = cv2.resize(frame, (new_width, new_height))
|
140 |
+
|
141 |
+
# # Calculate the position to crop the frame to the original size
|
142 |
+
# x_start = (new_width - width) // 2
|
143 |
+
# y_start = (new_height - height) // 2
|
144 |
+
# cropped_frame = resized_frame[y_start:y_start + height, x_start:x_start + width]
|
145 |
+
|
146 |
+
# return cropped_frame
|
147 |
+
|
148 |
+
# return clip.fl(zoom_in_effect, apply_to=['mask'])
|
149 |
+
|
150 |
+
# Example usage
|
151 |
+
# image_paths = "outputs/images"
|
152 |
+
# audio_paths = "outputs/audio"
|
153 |
+
|
154 |
+
# video_path = create_video_from_images_and_audio(image_paths, audio_paths)
|
155 |
+
# print(f"Video created at: {video_path}")
|
156 |
+
|
157 |
+
|
158 |
+
# class ImageGeneration(BaseModel):
|
159 |
+
# text : str = Field(description='description of sentence used for image generation')
|
160 |
+
# num : int = Field(description='sequence of description passed this tool. Used in image saving path. Example 1,2,3,4,5 and so on')
|
161 |
+
|
162 |
+
# class SpeechGeneration(BaseModel):
|
163 |
+
# text : str = Field(description='description of sentence used for image generation')
|
164 |
+
# num : int = Field(description='sequence of description passed this tool. Used in image saving path. Example 1,2,3,4,5 and so on')
|
165 |
+
|
166 |
+
import os
|
167 |
+
import cv2
|
168 |
+
from moviepy.editor import ImageClip, AudioFileClip, concatenate_videoclips, VideoFileClip
|
169 |
+
from PIL import Image, ImageDraw, ImageFont
|
170 |
+
import numpy as np
|
171 |
+
from groq import Groq
|
172 |
+
|
173 |
+
|
174 |
+
|
175 |
+
class VideoGeneration(BaseModel):
|
176 |
+
images_dir: str = Field(description='Path to images directory, such as "outputs/images"')
|
177 |
+
speeches_dir: str = Field(description='Path to speeches directory, such as "outputs/speeches"')
|
178 |
+
|
179 |
+
def split_text_into_chunks(text, chunk_size):
|
180 |
+
words = text.split()
|
181 |
+
return [' '.join(words[i:i + chunk_size]) for i in range(0, len(words), chunk_size)]
|
182 |
+
|
183 |
+
def add_text_to_video(input_video, output_video, text, duration=1, fontsize=40, fontcolor=(255, 255, 255),
|
184 |
+
outline_thickness=2, outline_color=(0, 0, 0), delay_between_chunks=0.1,
|
185 |
+
font_path=os.path.join(os.path.dirname(os.path.abspath(__file__)),'Montserrat-Bold.ttf')):
|
186 |
+
|
187 |
+
chunks = split_text_into_chunks(text, 3) # Adjust chunk size as needed
|
188 |
+
|
189 |
+
cap = cv2.VideoCapture(input_video)
|
190 |
+
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
|
191 |
+
fps = int(cap.get(cv2.CAP_PROP_FPS))
|
192 |
+
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
|
193 |
+
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
|
194 |
+
out = cv2.VideoWriter(output_video, fourcc, fps, (width, height))
|
195 |
+
|
196 |
+
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
|
197 |
+
chunk_duration_frames = duration * fps
|
198 |
+
delay_frames = int(delay_between_chunks * fps)
|
199 |
+
|
200 |
+
font = ImageFont.truetype(font_path, fontsize)
|
201 |
+
|
202 |
+
current_frame = 0
|
203 |
+
|
204 |
+
while cap.isOpened():
|
205 |
+
ret, frame = cap.read()
|
206 |
+
if not ret:
|
207 |
+
break
|
208 |
+
|
209 |
+
frame_pil = Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB))
|
210 |
+
draw = ImageDraw.Draw(frame_pil)
|
211 |
+
|
212 |
+
chunk_index = current_frame // (chunk_duration_frames + delay_frames)
|
213 |
+
|
214 |
+
if current_frame % (chunk_duration_frames + delay_frames) < chunk_duration_frames and chunk_index < len(chunks):
|
215 |
+
chunk = chunks[chunk_index]
|
216 |
+
text_width, text_height = draw.textsize(chunk, font=font)
|
217 |
+
text_x = (width - text_width) // 2
|
218 |
+
text_y = height - 400 # Position text at the bottom
|
219 |
+
|
220 |
+
if text_width > width:
|
221 |
+
words = chunk.split()
|
222 |
+
half = len(words) // 2
|
223 |
+
line1 = ' '.join(words[:half])
|
224 |
+
line2 = ' '.join(words[half:])
|
225 |
+
|
226 |
+
text_size_line1 = draw.textsize(line1, font=font)
|
227 |
+
text_size_line2 = draw.textsize(line2, font=font)
|
228 |
+
text_x_line1 = (width - text_size_line1[0]) // 2
|
229 |
+
text_x_line2 = (width - text_size_line2[0]) // 2
|
230 |
+
text_y = height - 250 - text_size_line1[1] # Adjust vertical position for two lines
|
231 |
+
|
232 |
+
for dx in range(-outline_thickness, outline_thickness + 1):
|
233 |
+
for dy in range(-outline_thickness, outline_thickness + 1):
|
234 |
+
if dx != 0 or dy != 0:
|
235 |
+
draw.text((text_x_line1 + dx, text_y + dy), line1, font=font, fill=outline_color)
|
236 |
+
draw.text((text_x_line2 + dx, text_y + text_size_line1[1] + dy), line2, font=font, fill=outline_color)
|
237 |
+
|
238 |
+
draw.text((text_x_line1, text_y), line1, font=font, fill=fontcolor)
|
239 |
+
draw.text((text_x_line2, text_y + text_size_line1[1]), line2, font=font, fill=fontcolor)
|
240 |
+
|
241 |
+
else:
|
242 |
+
for dx in range(-outline_thickness, outline_thickness + 1):
|
243 |
+
for dy in range(-outline_thickness, outline_thickness + 1):
|
244 |
+
if dx != 0 or dy != 0:
|
245 |
+
draw.text((text_x + dx, text_y + dy), chunk, font=font, fill=outline_color)
|
246 |
+
|
247 |
+
draw.text((text_x, text_y), chunk, font=font, fill=fontcolor)
|
248 |
+
|
249 |
+
frame = cv2.cvtColor(np.array(frame_pil), cv2.COLOR_RGB2BGR)
|
250 |
+
|
251 |
+
out.write(frame)
|
252 |
+
current_frame += 1
|
253 |
+
|
254 |
+
cap.release()
|
255 |
+
out.release()
|
256 |
+
cv2.destroyAllWindows()
|
257 |
+
|
258 |
+
def apply_zoom_in_effect(clip, zoom_factor=1.2):
|
259 |
+
width, height = clip.size
|
260 |
+
duration = clip.duration
|
261 |
+
|
262 |
+
def zoom_in_effect(get_frame, t):
|
263 |
+
frame = get_frame(t)
|
264 |
+
zoom = 1 + (zoom_factor - 1) * (t / duration)
|
265 |
+
new_width, new_height = int(width * zoom), int(height * zoom)
|
266 |
+
resized_frame = cv2.resize(frame, (new_width, new_height))
|
267 |
+
|
268 |
+
x_start = (new_width - width) // 2
|
269 |
+
y_start = (new_height - height) // 2
|
270 |
+
cropped_frame = resized_frame[y_start:y_start + height, x_start:x_start + width]
|
271 |
+
|
272 |
+
return cropped_frame
|
273 |
+
|
274 |
+
return clip.fl(zoom_in_effect, apply_to=['mask'])
|
275 |
+
|
276 |
+
@tool(args_schema=VideoGeneration)
|
277 |
+
def create_video_from_images_and_audio(images_dir, speeches_dir, zoom_factor=1.2):
|
278 |
+
"""Creates video using images and audios.
|
279 |
+
Args:
|
280 |
+
images_dir: path to images folder, example 'outputs/images'
|
281 |
+
speeches_dir: path to speeches folder, example 'outputs/speeches'"""
|
282 |
+
client = Groq()
|
283 |
+
images_paths = sorted(os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),images_dir)))
|
284 |
+
audio_paths = sorted(os.listdir(os.path.join(os.path.dirname(os.path.abspath(__file__)),speeches_dir)))
|
285 |
+
clips = []
|
286 |
+
temp_files = []
|
287 |
+
|
288 |
+
for i in range(min(len(images_paths), len(audio_paths))):
|
289 |
+
img_clip = ImageClip(os.path.join(os.path.dirname(os.path.abspath(__file__)),images_dir, images_paths[i]))
|
290 |
+
audioclip = AudioFileClip(os.path.join(os.path.dirname(os.path.abspath(__file__)),speeches_dir, audio_paths[i]))
|
291 |
+
videoclip = img_clip.set_duration(audioclip.duration)
|
292 |
+
zoomed_clip = apply_zoom_in_effect(videoclip, zoom_factor)
|
293 |
+
|
294 |
+
with open(os.path.join(os.path.dirname(os.path.abspath(__file__)),speeches_dir, audio_paths[i]), "rb") as file:
|
295 |
+
transcription = client.audio.transcriptions.create(
|
296 |
+
file=(audio_paths[i], file.read()),
|
297 |
+
model="whisper-large-v3",
|
298 |
+
response_format="verbose_json",
|
299 |
+
)
|
300 |
+
caption = transcription.text
|
301 |
+
|
302 |
+
temp_video_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), f"outputs/final_video/temp_zoomed_{i}.mp4")
|
303 |
+
zoomed_clip.write_videofile(temp_video_path, codec='libx264', fps=24)
|
304 |
+
temp_files.append(temp_video_path)
|
305 |
+
|
306 |
+
final_video_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), f"outputs/final_video/temp_captioned_{i}.mp4")
|
307 |
+
add_text_to_video(temp_video_path, final_video_path, caption, duration=1, fontsize=60)
|
308 |
+
temp_files.append(final_video_path)
|
309 |
+
|
310 |
+
final_clip = VideoFileClip(final_video_path)
|
311 |
+
final_clip = final_clip.set_audio(audioclip)
|
312 |
+
|
313 |
+
clips.append(final_clip)
|
314 |
+
|
315 |
+
final_clip = concatenate_videoclips(clips)
|
316 |
+
final_clip.write_videofile(os.path.join(os.path.dirname(os.path.abspath(__file__)), "outputs/final_video/final_video.mp4"), codec='libx264', fps=24)
|
317 |
+
|
318 |
+
# Close all video files properly
|
319 |
+
for clip in clips:
|
320 |
+
clip.close()
|
321 |
+
|
322 |
+
# Remove all temporary files
|
323 |
+
for temp_file in temp_files:
|
324 |
+
try:
|
325 |
+
os.remove(temp_file)
|
326 |
+
except Exception as e:
|
327 |
+
print(f"Error removing file {temp_file}: {e}")
|
328 |
+
|
329 |
+
return os.path.join(os.path.dirname(os.path.abspath(__file__)), "outputs/final_video/final_video.mp4")
|
330 |
+
|
331 |
+
# Example usage
|
332 |
+
# image_paths = "outputs/images"
|
333 |
+
# audio_paths = "outputs/speeches"
|
334 |
+
|
335 |
+
# video_path = create_video_from_images_and_audio(image_paths, audio_paths)
|
336 |
+
# print(f"Video created at: {video_path}")
|
337 |
+
|
338 |
+
class WikiInputs(BaseModel):
|
339 |
+
"""Inputs to the wikipedia tool."""
|
340 |
+
query: str = Field(description="query to look up in Wikipedia, should be 3 or less words")
|
341 |
+
|
342 |
+
api_wrapper = WikipediaAPIWrapper(top_k_results=3)#, doc_content_chars_max=100)
|
343 |
+
|
344 |
+
wiki_tool = WikipediaQueryRun(
|
345 |
+
name="wiki-tool",
|
346 |
+
description="{query:'input here'}",
|
347 |
+
args_schema=WikiInputs,
|
348 |
+
api_wrapper=api_wrapper,
|
349 |
+
return_direct=True,
|
350 |
+
)
|
351 |
+
|
352 |
+
wiki = Tool(
|
353 |
+
name = 'wikipedia',
|
354 |
+
func = wiki_tool.run,
|
355 |
+
description= "{query:'input here'}"
|
356 |
+
)
|
357 |
+
|
358 |
+
# wiki_tool.run("latest news in India")
|
359 |
+
|
360 |
+
# @tool
|
361 |
+
def process_script(script):
|
362 |
+
"""Used to process the script into dictionary format"""
|
363 |
+
dict = {}
|
364 |
+
dict['text_for_image_generation'] = re.findall(r'<image>(.*?)</?image>', script)
|
365 |
+
dict['text_for_speech_generation'] = re.findall(r'<narration>.*?</?narration>', script)
|
366 |
+
return dict
|
367 |
+
|
368 |
+
@tool#(args_schema=ImageGeneration)
|
369 |
+
def image_generator(script):
|
370 |
+
"""Generates images for the given script.
|
371 |
+
Saves it to images_dir and return path
|
372 |
+
Args:
|
373 |
+
script: a complete script containing narrations and image descriptions"""
|
374 |
+
images_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), './outputs/images')
|
375 |
+
# if num==1:
|
376 |
+
for filename in os.listdir(images_dir):
|
377 |
+
file_path = os.path.join(images_dir, filename)
|
378 |
+
if os.path.isfile(file_path):
|
379 |
+
os.remove(file_path)
|
380 |
+
|
381 |
+
dict = process_script(script)
|
382 |
+
for i, text in enumerate(dict['text_for_image_generation']):
|
383 |
+
# image = pipe(text, num_inference_steps=12, guidance_scale=2, width=720, height=1280, verbose=0).images[0]
|
384 |
+
# image.save(os.path.join(images_dir, f'image{i}.jpg'))
|
385 |
+
response = requests.post(
|
386 |
+
f"https://api.stability.ai/v2beta/stable-image/generate/core",
|
387 |
+
headers={
|
388 |
+
"authorization": os.environ.get('STABILITY_AI_API_KEY'),
|
389 |
+
"accept": "image/*"
|
390 |
+
},
|
391 |
+
files={"none": ''},
|
392 |
+
data={
|
393 |
+
"prompt": text,
|
394 |
+
"output_format": "png",
|
395 |
+
'aspect_ratio': "9:16",
|
396 |
+
},
|
397 |
+
)
|
398 |
+
|
399 |
+
if response.status_code == 200:
|
400 |
+
with open(os.path.join(images_dir, f'image_{i}.png'), 'wb') as file:
|
401 |
+
file.write(response.content)
|
402 |
+
else:
|
403 |
+
raise Exception(str(response.json()))
|
404 |
+
return f'images generated.'#f'image generated for "{text}" and saved to directory {images_dir} as image{num}.jpg'
|
405 |
+
|
406 |
+
@tool
|
407 |
+
def speech_generator(script):
|
408 |
+
"""Generates speech for given text
|
409 |
+
Saves it to speech_dir and return path
|
410 |
+
Args:
|
411 |
+
script: a complete script containing narrations and image descriptions"""
|
412 |
+
speech_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), './outputs/speeches')
|
413 |
+
|
414 |
+
# if num==1:
|
415 |
+
for filename in os.listdir(speech_dir):
|
416 |
+
file_path = os.path.join(speech_dir, filename)
|
417 |
+
if os.path.isfile(file_path):
|
418 |
+
os.remove(file_path)
|
419 |
+
|
420 |
+
dict = process_script(script)
|
421 |
+
print(dict)
|
422 |
+
for i, text in enumerate(dict['text_for_speech_generation']):
|
423 |
+
generate_speech(text, speech_dir, num=i)
|
424 |
return f'speechs generated.'#f'speech generated for "{text}" and saved to directory {speech_dir} as speech{num}.mp3'
|