Mbonea commited on
Commit
4e4bad2
1 Parent(s): 6a4b58c
App/Generate/database/Model.py CHANGED
@@ -6,6 +6,7 @@ from pydub import AudioSegment
6
  from .DescriptAPI import Speak
7
  from .ElevenLab import ElevenLab
8
  from .Vercel import AsyncImageGenerator
 
9
  import aiohttp
10
  from typing import List
11
  from pydantic import BaseModel
@@ -56,7 +57,7 @@ class Project(orm.Model):
56
  }
57
 
58
  async def get_all_scenes(self):
59
- return await Scene.objects.filter(project=self).order_by("id").all()
60
 
61
  async def generate_json(self):
62
  project_scenes: List[Scene] = await self.get_all_scenes()
@@ -113,34 +114,67 @@ class Project(orm.Model):
113
  )
114
  text_stream.extend(temp[:-1])
115
 
116
- ## images and transitions
117
- for image in scene.images:
118
- file_name = str(uuid.uuid4()) + ".png"
119
- self.links.append({"file_name": file_name, "link": image})
120
- image_assets.append(
121
- {
122
- "type": "image",
123
- "name": file_name,
124
- "start": self.start,
125
- "end": self.start + scene.image_duration,
126
- }
127
- )
128
- self.start = self.start + scene.image_duration
129
-
130
- # transitions between images
131
- video_assets.append(
132
- {
133
- "type": "video",
134
- "name": "Effects/" + random.choice(transitions),
135
- "start": self.start - 1,
136
- "end": self.start + 2,
137
- "props": {
138
- "startFrom": 1 * 30,
139
- "endAt": 3 * 30,
140
- "volume": 0,
141
- },
142
- }
143
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
144
 
145
  self.assets.append({"type": "audio", "sequence": audio_assets})
146
  ## add the images to assets
@@ -197,7 +231,6 @@ class Scene(orm.Model):
197
  self.narration_link = link
198
 
199
  async def retry_narration_generation(self):
200
- print(self.narration)
201
  retry_count = 0
202
  while retry_count < 3:
203
  try:
 
6
  from .DescriptAPI import Speak
7
  from .ElevenLab import ElevenLab
8
  from .Vercel import AsyncImageGenerator
9
+ from .Video3d import VideoGenerator
10
  import aiohttp
11
  from typing import List
12
  from pydantic import BaseModel
 
57
  }
58
 
59
  async def get_all_scenes(self):
60
+ return await Scene.objects.filter(project=self).all()
61
 
62
  async def generate_json(self):
63
  project_scenes: List[Scene] = await self.get_all_scenes()
 
114
  )
115
  text_stream.extend(temp[:-1])
116
 
117
+ sample_image_extension = scene.images[0].split(".")[-1]
118
+
119
+ if sample_image_extension == "mp4":
120
+ ## moving images
121
+ for image in scene.images:
122
+ file_name = str(uuid.uuid4()) + ".mp4"
123
+ self.links.append({"file_name": file_name, "link": image})
124
+ video_assets.append(
125
+ {
126
+ "type": "video",
127
+ "name": file_name,
128
+ "start": self.start,
129
+ "end": self.start + scene.image_duration,
130
+ "props": {
131
+ "volume": 0,
132
+ "loop": "true",
133
+ "style": {
134
+ {
135
+ "transform": "translate(-50%, -50%)",
136
+ "position": "absolute",
137
+ "top": "50%",
138
+ "left": "50%",
139
+ "width": 1080,
140
+ "height": 1920,
141
+ "objectFit": "cover",
142
+ }
143
+ },
144
+ },
145
+ }
146
+ )
147
+ self.start = self.start + scene.image_duration
148
+
149
+ else:
150
+ ## images and transitions
151
+ for image in scene.images:
152
+ file_name = str(uuid.uuid4()) + ".png"
153
+ self.links.append({"file_name": file_name, "link": image})
154
+ image_assets.append(
155
+ {
156
+ "type": "image",
157
+ "name": file_name,
158
+ "start": self.start,
159
+ "end": self.start + scene.image_duration,
160
+ }
161
+ )
162
+ self.start = self.start + scene.image_duration
163
+
164
+ ## transitions between images
165
+ # video_assets.append(
166
+ # {
167
+ # "type": "video",
168
+ # "name": "Effects/" + random.choice(transitions),
169
+ # "start": self.start - 1,
170
+ # "end": self.start + 2,
171
+ # "props": {
172
+ # "startFrom": 1 * 30,
173
+ # "endAt": 3 * 30,
174
+ # "volume": 0,
175
+ # },
176
+ # }
177
+ # )
178
 
179
  self.assets.append({"type": "audio", "sequence": audio_assets})
180
  ## add the images to assets
 
231
  self.narration_link = link
232
 
233
  async def retry_narration_generation(self):
 
234
  retry_count = 0
235
  while retry_count < 3:
236
  try:
App/Generate/database/Video3d.py ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import aiohttp
2
+ import asyncio
3
+ from itertools import chain
4
+
5
+
6
+ class VideoGenerator:
7
+ def __init__(self):
8
+ self.base_urls = [f"https://yakova-depthflow-{i}.hf.space" for i in range(10)]
9
+ self.headers = {"accept": "application/json"}
10
+ self.default_params = {
11
+ "frame_rate": 30,
12
+ "duration": 3,
13
+ "quality": 1,
14
+ "ssaa": 0.8,
15
+ "raw": "false",
16
+ }
17
+
18
+ async def generate_video(self, base_url, params):
19
+ url = f"{base_url}/generate_video"
20
+
21
+ async with aiohttp.ClientSession() as session:
22
+ async with session.post(
23
+ url, params=params, headers=self.headers
24
+ ) as response:
25
+ if response.status == 200:
26
+ data = await response.json()
27
+ output_file = data.get("output_file")
28
+ return output_file
29
+ else:
30
+ print(f"Request to {url} failed with status: {response.status}")
31
+ return None
32
+
33
+ async def check_video_ready(self, base_url, output_file):
34
+ url = f"{base_url}/download/{output_file}"
35
+
36
+ async with aiohttp.ClientSession() as session:
37
+ while True:
38
+ async with session.get(url, headers=self.headers) as response:
39
+ if response.status == 200:
40
+ video_content = await response.read()
41
+ if len(video_content) > 0:
42
+ return url
43
+ else:
44
+ print(
45
+ f"Video {output_file} is ready but the file size is zero, retrying in 10 seconds..."
46
+ )
47
+ await asyncio.sleep(120)
48
+ elif response.status == 404:
49
+ data = await response.json()
50
+ if data.get("detail") == "Video not found":
51
+ print(
52
+ f"Video {output_file} not ready yet, retrying in 10 seconds..."
53
+ )
54
+ await asyncio.sleep(10)
55
+ else:
56
+ print(f"Unexpected response for {output_file}: {data}")
57
+ return None
58
+ else:
59
+ print(f"Request to {url} failed with status: {response.status}")
60
+ return None
61
+
62
+ async def process_image(self, base_url, image_link):
63
+ params = self.default_params.copy()
64
+ params["image_link"] = image_link
65
+
66
+ output_file = await self.generate_video(base_url, params)
67
+ if output_file:
68
+ print(f"Generated video file id: {output_file} for image {image_link}")
69
+ video_url = await self.check_video_ready(base_url, output_file)
70
+ if video_url:
71
+ print(
72
+ f"Video for {image_link} is ready and can be downloaded from: {video_url}"
73
+ )
74
+ return video_url
75
+ else:
76
+ print(f"Failed to get the video URL for {image_link}")
77
+ return None
78
+ else:
79
+ print(f"Failed to generate the video for {image_link}")
80
+ return None
81
+
82
+ def flatten(self, nested_list):
83
+ return list(chain.from_iterable(nested_list))
84
+
85
+ def nest(self, flat_list, nested_dims):
86
+ it = iter(flat_list)
87
+ return [[next(it) for _ in inner_list] for inner_list in nested_dims]
88
+
89
+ async def run(self, nested_image_links):
90
+ flat_image_links = self.flatten(nested_image_links)
91
+ tasks = []
92
+ base_index = 0
93
+
94
+ for image_link in flat_image_links:
95
+ base_url = self.base_urls[base_index % len(self.base_urls)]
96
+ tasks.append(self.process_image(base_url, image_link))
97
+ base_index += 1
98
+
99
+ flat_video_urls = await asyncio.gather(*tasks)
100
+ nested_video_urls = self.nest(flat_video_urls, nested_image_links)
101
+ return nested_video_urls
102
+
103
+
104
+ # # Example usage
105
+ # nested_image_links = [
106
+ # [
107
+ # "https://replicate.delivery/yhqm/mQId1rdf4Z3odCyB7cPsx1KwhHfdRc3w44eYAGNG9AQfV0dMB/out-0.png"
108
+ # ],
109
+ # [
110
+ # "https://replicate.delivery/yhqm/mQId1rdf4Z3odCyB7cPsx1KwhHfdRc3w44eYAGNG9AQfV0dMB/out-1.png",
111
+ # "https://replicate.delivery/yhqm/mQId1rdf4Z3odCyB7cPsx1KwhHfdRc3w44eYAGNG9AQfV0dMB/out-2.png",
112
+ # ],
113
+ # # Add more nested image links here
114
+ # ]
115
+
116
+ # loop = asyncio.get_event_loop()
117
+ # video_generator = VideoGenerator()
118
+ # nested_video_urls = loop.run_until_complete(video_generator.run(nested_image_links))
119
+
120
+ # print("Generated video URLs:", nested_video_urls)
App/Generate/generatorRoutes.py CHANGED
@@ -6,7 +6,14 @@ from .utils.HuggingChat import Hugging
6
  from .Story.Story import Story
7
  import asyncio, pprint, json
8
  from tqdm import tqdm
9
- from .database.Model import models, database_url, Scene, Project, database
 
 
 
 
 
 
 
10
  from .utils.RenderVideo import RenderVideo
11
  from .Prompts.StoryGen import Prompt
12
  from App.Editor.editorRoutes import celery_task, EditorRequest
@@ -23,18 +30,21 @@ async def from_dict_generate(data: Story):
23
  await generate_assets(generated_story=generated_strory)
24
 
25
 
26
- async def generate_assets(generated_story: Story, batch_size=4):
27
  x = await Project.objects.create(name=str(uuid.uuid4()))
28
 
29
  # Assuming generated_story.scenes is a list of scenes
30
- scene_updates = []
31
  with tqdm(total=len(generated_story.scenes)) as pbar:
 
 
 
32
  for i in range(0, len(generated_story.scenes), batch_size):
33
  batch = generated_story.scenes[
34
  i : i + batch_size
35
  ] # Get a batch of two story scenes
36
  batch_updates = []
37
 
 
38
  for story_scene in batch:
39
  model_scene = await Scene.objects.create(project=x)
40
  model_scene.image_prompts = story_scene.image_prompts
@@ -43,12 +53,27 @@ async def generate_assets(generated_story: Story, batch_size=4):
43
  batch_updates.append(
44
  update_scene(model_scene)
45
  ) # Append update coroutine to batch_updates
46
- scene_updates.extend(batch_updates) # Accumulate updates for later awaiting
47
  await asyncio.gather(
48
  *batch_updates
49
  ) # Await update coroutines for this batch
 
50
  pbar.update(len(batch)) # Increment progress bar by the size of the batch
51
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
52
  temp = await x.generate_json()
53
  # print(temp)
54
 
 
6
  from .Story.Story import Story
7
  import asyncio, pprint, json
8
  from tqdm import tqdm
9
+ from .database.Model import (
10
+ models,
11
+ database_url,
12
+ Scene,
13
+ Project,
14
+ database,
15
+ VideoGenerator,
16
+ )
17
  from .utils.RenderVideo import RenderVideo
18
  from .Prompts.StoryGen import Prompt
19
  from App.Editor.editorRoutes import celery_task, EditorRequest
 
30
  await generate_assets(generated_story=generated_strory)
31
 
32
 
33
+ async def generate_assets(generated_story: Story, batch_size=4, threeD=True):
34
  x = await Project.objects.create(name=str(uuid.uuid4()))
35
 
36
  # Assuming generated_story.scenes is a list of scenes
 
37
  with tqdm(total=len(generated_story.scenes)) as pbar:
38
+
39
+ all_scenes: list[Scene] = []
40
+ # create the batches
41
  for i in range(0, len(generated_story.scenes), batch_size):
42
  batch = generated_story.scenes[
43
  i : i + batch_size
44
  ] # Get a batch of two story scenes
45
  batch_updates = []
46
 
47
+ # generate pictures or narration per batch
48
  for story_scene in batch:
49
  model_scene = await Scene.objects.create(project=x)
50
  model_scene.image_prompts = story_scene.image_prompts
 
53
  batch_updates.append(
54
  update_scene(model_scene)
55
  ) # Append update coroutine to batch_updates
56
+ # pause per batch
57
  await asyncio.gather(
58
  *batch_updates
59
  ) # Await update coroutines for this batch
60
+ all_scenes.append(model_scene)
61
  pbar.update(len(batch)) # Increment progress bar by the size of the batch
62
 
63
+ ###### Here we generate the videos
64
+
65
+ if threeD:
66
+ vid_gen = VideoGenerator
67
+ nested_images = []
68
+ for scene in all_scenes:
69
+ nested_images.append(scene.images)
70
+
71
+ results = await vid_gen.run(nested_image_links=nested_images)
72
+
73
+ for result, _scene in zip(results, all_scenes):
74
+ _scene.images = result
75
+ await _scene.update(**_scene.__dict__)
76
+
77
  temp = await x.generate_json()
78
  # print(temp)
79
 
App/Generate/utils/Cohere.py CHANGED
@@ -22,7 +22,11 @@ class VideoOutput(BaseModel):
22
 
23
  # Patching the Cohere client with the instructor for enhanced capabilities
24
  client = instructor.from_cohere(
25
- cohere.Client(os.environ.get("COHERE_API", "RANDOM_STRING")),
 
 
 
 
26
  # max_tokens=5000,
27
  model="command-r-plus",
28
  )
@@ -36,7 +40,7 @@ def chatbot(prompt: str, model: str = "command-r-plus"):
36
 
37
  response: VideoOutput = client.chat.completions.create(
38
  model=model,
39
- max_tokens=5000,
40
  response_model=VideoOutput,
41
  messages=[
42
  {
@@ -46,3 +50,6 @@ def chatbot(prompt: str, model: str = "command-r-plus"):
46
  ],
47
  )
48
  return response.dict()
 
 
 
 
22
 
23
  # Patching the Cohere client with the instructor for enhanced capabilities
24
  client = instructor.from_cohere(
25
+ cohere.Client(
26
+ os.environ.get(
27
+ "COHERE_API",
28
+ )
29
+ ),
30
  # max_tokens=5000,
31
  model="command-r-plus",
32
  )
 
40
 
41
  response: VideoOutput = client.chat.completions.create(
42
  model=model,
43
+ # max_tokens=5000,
44
  response_model=VideoOutput,
45
  messages=[
46
  {
 
50
  ],
51
  )
52
  return response.dict()
53
+
54
+
55
+ # print(chatbot("A horror story"))
Remotion-app/package.json CHANGED
@@ -17,8 +17,6 @@
17
  "@remotion/transitions": "4.0.147",
18
  "@remotion/zod-types": "4.0.147",
19
  "@remotion/tailwind": "4.0.147",
20
- "class-variance-authority": "^0.7.0",
21
- "clsx": "^2.1.0",
22
  "react": "^18.0.0",
23
  "react-dom": "^18.0.0",
24
  "remotion": "4.0.147",
 
17
  "@remotion/transitions": "4.0.147",
18
  "@remotion/zod-types": "4.0.147",
19
  "@remotion/tailwind": "4.0.147",
 
 
20
  "react": "^18.0.0",
21
  "react-dom": "^18.0.0",
22
  "remotion": "4.0.147",
Remotion-app/remotion.config.js CHANGED
@@ -12,4 +12,4 @@ Config.overrideWebpackConfig((currentConfiguration) => {
12
 
13
  //Config.setBrowserExecutable("/usr/bin/chrome-headless-shell");
14
  Config.setVideoImageFormat('jpeg');
15
- // Config.setConcurrency(2);
 
12
 
13
  //Config.setBrowserExecutable("/usr/bin/chrome-headless-shell");
14
  Config.setVideoImageFormat('jpeg');
15
+ Config.setConcurrency(1);