File size: 2,293 Bytes
d65b1bc |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 |
from fastapi import APIRouter, HTTPException, status, BackgroundTasks, UploadFile, Query
from .Schema import GeneratorRequest
from .utils.GroqInstruct import chatbot
from .Story.Story import Story
import asyncio, pprint, json
from tqdm import tqdm
from .database.Model import models, database_url, Scene, Project
from .utils.RenderVideo import RenderVideo
from .Prompts.StoryGen import Prompt
async def update_scene(model_scene):
await model_scene.generate_scene_data()
await model_scene.update(**model_scene.__dict__)
async def main(request: GeneratorRequest):
topic = request.prompt
renderr = RenderVideo()
await models._create_all(database_url)
message = chatbot(Prompt.format(topic=topic))
generated_story = Story.from_dict(message["scenes"])
print("Generated Story ✅")
x = await Project.objects.create(name=topic[0:100])
# Assuming generated_story.scenes is a list of scenes
scene_updates = []
with tqdm(total=len(generated_story.scenes)) as pbar:
for i in range(0, len(generated_story.scenes), 2):
batch = generated_story.scenes[i : i + 2] # Get a batch of two story scenes
batch_updates = []
for story_scene in batch:
model_scene = await Scene.objects.create(project=x)
model_scene.image_prompts = story_scene.image_prompts
model_scene.narration = story_scene.narration
await model_scene.update(**model_scene.__dict__)
batch_updates.append(
update_scene(model_scene)
) # Append update coroutine to batch_updates
scene_updates.extend(batch_updates) # Accumulate updates for later awaiting
await asyncio.gather(
*batch_updates
) # Await update coroutines for this batch
pbar.update(len(batch)) # Increment progress bar by the size of the batch
temp = await x.generate_json()
await renderr.render_video(temp)
generator_router = APIRouter(tags=["video-Generator"])
@generator_router.post("/generate_video")
async def generate_video(
videoRequest: GeneratorRequest, background_task: BackgroundTasks
):
background_task.add_task(main, videoRequest)
return {"task_id": "started"}
|