generation update
Browse files- App/Generate/Prompts/Example.json +67 -0
- App/Generate/Prompts/StoryGen.py +23 -0
- App/Generate/Prompts/StoryGen.text +21 -0
- App/Generate/Schema.py +7 -0
- App/Generate/Story/Story.py +55 -0
- App/Generate/database/DescriptAPI.py +196 -0
- App/Generate/database/Model.py +301 -0
- App/Generate/database/Test.py +102 -0
- App/Generate/database/Vercel.py +104 -0
- App/Generate/generatorRoutes.py +63 -0
- App/Generate/utils/Bing.py +53 -0
- App/Generate/utils/GroqInstruct.py +77 -0
- App/Generate/utils/HuggingChat.py +36 -0
- App/Generate/utils/RenderVideo.py +24 -0
- App/Generate/utils/VideoEditor.py +90 -0
- App/app.py +2 -0
- Remotion-app/JsonMaker.py +29 -0
- requirements.txt +5 -1
App/Generate/Prompts/Example.json
ADDED
@@ -0,0 +1,67 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[
|
2 |
+
{
|
3 |
+
"narration": "Welcome to 'Weird History Facts', where we uncover the bizarre truths hidden in the past! Our first tale takes us back to ancient Egypt and a pharaoh's unusual obsession... Let the images unfold with this prompt: ",
|
4 |
+
"images_prompt": [
|
5 |
+
"Ancient Egypt, mysterious, hieroglyphics, desert, pyramids, Canon EOS 5D, 24-70mm, midday, harsh shadows, cinematic, Kodak Ektar 100, highly realistic, bright desert sun"
|
6 |
+
]
|
7 |
+
},
|
8 |
+
{
|
9 |
+
"narration": "Pharaoh Khafra, builder of the second largest pyramid at Giza, had a peculiar fixation with... onions! Yes, this mighty ruler was convinced that onions held the key to eternal life. So much so, that he demanded his workers be paid in onions instead of gold. Imagine, an entire civilization fueled by the power of... well, tears and pungent breath. Now, let's move on to a different type of tear-jerker.",
|
10 |
+
"images_prompt": [
|
11 |
+
"Pharaoh Khafra, regal, onion-shaped crown, golden tears, onion field, Nikon D850, 70-200mm, morning, soft focus, dramatic, Ilford Delta 3200, moderately realistic, warm golden light",
|
12 |
+
"Ancient Egyptian workers, onion currency, onion necklaces, desert market, Canon EOS 5D, 35mm, noon, harsh sunlight, vibrant, Agfa Vista 200, highly realistic, strong natural light"
|
13 |
+
]
|
14 |
+
},
|
15 |
+
{
|
16 |
+
"narration": "Next, we travel to medieval Europe, where a strange tax was once levied. It's a hair-raising tale, so brace yourselves and imagine the following: ",
|
17 |
+
"images_prompt": [
|
18 |
+
"Medieval Europe, stone castle, moat, colorful flags, cloudy sky, DJI Mavic, 12mm, afternoon, overcast, ethereal, Fuji Pro 400H, highly realistic, soft natural light"
|
19 |
+
]
|
20 |
+
},
|
21 |
+
{
|
22 |
+
"narration": "In the 11th century, England introduced a 'beard tax' to discourage the fashion of wearing beards. Yes, King Henry I decided that beards were a sign of witchcraft and demanded a tax from any man who wished to keep his facial hair. So, if you thought shaving was a hassle nowadays, be glad you didn't live in medieval England! Now, let's whisk away to a different kind of whiskers.",
|
23 |
+
"images_prompt": [
|
24 |
+
"King Henry I, bearded, throne, angry, beard tax scroll, Sony A7R, 50mm, evening, dramatic lighting, cinematic, Ilford Pan 100, highly realistic, warm candlelight",
|
25 |
+
"Medieval man, shaving, scared of tax collector, humorous, Nikon D850, 85mm, morning, soft natural light, intimate, Kodak Portra 400, moderately realistic, window light"
|
26 |
+
]
|
27 |
+
},
|
28 |
+
{
|
29 |
+
"narration": "Our next story involves a famous explorer and a rather unusual method of navigation. Prepare to set sail with this prompt: ",
|
30 |
+
"images_prompt": [
|
31 |
+
"Ocean voyage, 15th century, stormy seas, sailing ship, dramatic sky, Hasselblad H6D, 24mm, afternoon, dramatic, Fuji Pro 400H, highly realistic, harsh sunlight"
|
32 |
+
]
|
33 |
+
},
|
34 |
+
{
|
35 |
+
"narration": "Christopher Columbus, the famous Italian explorer, believed that the tail of the Milky Way galaxy was a surefire way to navigate. He used it as a compass during his voyages, believing it pointed directly to the North Star. Imagine, one of history's greatest explorers relying on the stars for more than just romantic stargazing! Now, let's navigate to a different kind of star power.",
|
36 |
+
"images_prompt": [
|
37 |
+
"Christopher Columbus, night sky, sailing ship, Milky Way, compass, Canon EOS 5D, 14mm, nighttime, starry, Kodak Ektar 100, highly realistic, moonlight",
|
38 |
+
"Milky Way galaxy, tail, stars, abstract, cosmic, DJI Mavic, 10mm, night, vibrant, Agfa Vista 200, moderately realistic, nebula light"
|
39 |
+
]
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"narration": "In the world of entertainment, we've had our fair share of oddities. Let's shine a spotlight on one such curious case with this prompt: ",
|
43 |
+
"images_prompt": [
|
44 |
+
"Vaudeville stage, early 20th century, colorful lights, crowded theater, Canon EOS 5D, 24-70mm, evening, vibrant, cinematic, Kodak Portra 400, highly realistic, stage lighting"
|
45 |
+
]
|
46 |
+
},
|
47 |
+
{
|
48 |
+
"narration": "The famous escape artist Harry Houdini had a secret weapon for his acts—his mother! Before each performance, she would secretly lock him in a set of handcuffs, ensuring he could escape from any pair. A heartwarming and slightly bizarre tale of motherly love. Now, let's unlock the next story, which is equally as intriguing.",
|
49 |
+
"images_prompt": [
|
50 |
+
"Harry Houdini, handcuffed, stage, concerned, mother in audience, Nikon D850, 85mm, night, dramatic, Ilford Delta 3200, highly realistic, stage lighting",
|
51 |
+
"Vintage handcuffs, key, wooden table, soft focus, intimate, Sony A7R, 90mm, afternoon, warm lighting, nostalgic, Fuji Pro 400H, moderately realistic, window light"
|
52 |
+
]
|
53 |
+
},
|
54 |
+
{
|
55 |
+
"narration": "Our final tale involves a famous artist and their rather peculiar palette. Get ready to paint a picture with this prompt: ",
|
56 |
+
"images_prompt": [
|
57 |
+
"Artist's studio, 19th century, canvas, paint tubes, natural light, Hasselblad H6D, 50mm, morning, soft, nostalgic, Kodak Ektar 100, highly realistic, warm morning light"
|
58 |
+
]
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"narration": "Vincent van Gogh, the post-impressionist master, is known for his vibrant use of color. But did you know he also ate his paints? Yes, van Gogh suffered from pica, a condition causing him to eat non-food items. He believed consuming his paints would bring him closer to his art. A bizarre and slightly dangerous way to embrace your passion! And with that, we conclude our journey through weird history facts.",
|
62 |
+
"images_prompt": [
|
63 |
+
"Vincent van Gogh, paint palette, eating paint, concerned friend, Nikon D850, 35mm, afternoon, soft natural light, intimate, Kodak Portra 400, highly realistic, window light",
|
64 |
+
"Vibrant paint tubes, still life, colorful, abstract, Sony A7R, 90mm, morning, ethereal, Fuji Pro 400H, moderately realistic, soft light"
|
65 |
+
]
|
66 |
+
}
|
67 |
+
]
|
App/Generate/Prompts/StoryGen.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Prompt = """
|
2 |
+
Generate a variety of high-quality prompts for Stable Diffusion XL (SDXL), the latest image generation model. With SDXL, you can create realistic images with improved face generation, and stunning art with shorter prompts with this format: "[TYPE OF IMAGE] [SUBJECT] [ENVIRONMENT] [5 ACTION WORDS], [COMPOSITION], [MODIFIERS], [PHOTOGRAPHY TERMS], [ART MEDIUM], [ART STYLE], [ARTIST STYLE], [ENHANCEMENTS]
|
3 |
+
|
4 |
+
Let's break down the components of the composition format:
|
5 |
+
|
6 |
+
[TYPE OF IMAGE]: Specifies the type of image being generated, such as photography, illustration, painting, etc.
|
7 |
+
[SUBJECT]: The main focus or central element of the image.
|
8 |
+
[ENVIRONMENT]: Describes the setting or backdrop in which the subject is placed.
|
9 |
+
[5 ACTION WORDS]: Specifies the action or state of being of the subject, adding dynamism or emotion. NOT MORE THAN 5 WORDS
|
10 |
+
[COMPOSITION]: Refers to the arrangement or organization of elements within the image, providing guidance to the AI model on how to frame the scene.
|
11 |
+
[MODIFIERS]: Additional elements that can enhance the composition, such as camera angles, perspectives, or spatial relationships.
|
12 |
+
[PHOTOGRAPHY TERMS]: Describes elements related to photography, such as shot type, lighting, composition techniques, etc.
|
13 |
+
[ART MEDIUM]: Specifies the medium or materials used in artistic expression, such as digital illustration, oil painting, etc.
|
14 |
+
[ART STYLE]: Defines the overall artistic style or aesthetic of the image.
|
15 |
+
[ARTIST STYLE]: Optionally, specifies a particular artist or artistic influence that informs the style or composition of the image.
|
16 |
+
[ENHANCEMENTS]: Additional modifiers that enhance the image quality or provide specific details, such as HDR, vivid colors, etc.
|
17 |
+
|
18 |
+
Remember these parts are just placeholders to guide you!
|
19 |
+
My JOB IS AT RISK HERE.
|
20 |
+
Just make sure that images have a coherent and are visually appealing, to keep the viewers engaged, and the prompt should be coherent meaning they should go together.
|
21 |
+
Make sure that the json keys are spelled correctly. Take a deep breath in every step for extra concentation
|
22 |
+
Now, create an engaging video using images and narration. The videos should be about {topic}. the output should be a json containing a list of objects with keys "narration" which is the narration of the video during that scene and a "image_prompts" which is a list of sdxl prompts during the narration You need create an appropriate number of image_prompts, proportional to the length of the current objects narration. It should match so as to make the story good and engaging. Only output the json markdown
|
23 |
+
"""
|
App/Generate/Prompts/StoryGen.text
ADDED
@@ -0,0 +1,21 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Generate a variety of high-quality prompts for Stable Diffusion XL (SDXL), the latest image generation model. With SDXL, you can create realistic images with improved face generation, and stunning art with shorter prompts with this format: "[TYPE OF IMAGE] [SUBJECT] [ENVIRONMENT] [5 ACTION WORDS], [COMPOSITION], [MODIFIERS], [PHOTOGRAPHY TERMS], [ART MEDIUM], [ART STYLE], [ARTIST STYLE], [ENHANCEMENTS]
|
2 |
+
|
3 |
+
Let's break down the components of the composition format:
|
4 |
+
|
5 |
+
[TYPE OF IMAGE]: Specifies the type of image being generated, such as photography, illustration, painting, etc.
|
6 |
+
[SUBJECT]: The main focus or central element of the image.
|
7 |
+
[ENVIRONMENT]: Describes the setting or backdrop in which the subject is placed.
|
8 |
+
[5 ACTION WORDS]: Specifies the action or state of being of the subject, adding dynamism or emotion. NOT MORE THAN 5 WORDS
|
9 |
+
[COMPOSITION]: Refers to the arrangement or organization of elements within the image, providing guidance to the AI model on how to frame the scene.
|
10 |
+
[MODIFIERS]: Additional elements that can enhance the composition, such as camera angles, perspectives, or spatial relationships.
|
11 |
+
[PHOTOGRAPHY TERMS]: Describes elements related to photography, such as shot type, lighting, composition techniques, etc.
|
12 |
+
[ART MEDIUM]: Specifies the medium or materials used in artistic expression, such as digital illustration, oil painting, etc.
|
13 |
+
[ART STYLE]: Defines the overall artistic style or aesthetic of the image.
|
14 |
+
[ARTIST STYLE]: Optionally, specifies a particular artist or artistic influence that informs the style or composition of the image.
|
15 |
+
[ENHANCEMENTS]: Additional modifiers that enhance the image quality or provide specific details, such as HDR, vivid colors, etc.
|
16 |
+
|
17 |
+
Remember these parts are just placeholders to guide you!
|
18 |
+
My JOB IS AT RISK HERE.
|
19 |
+
Just make sure that images have a coherent and are visually appealing, to keep the viewers engaged, and the prompt should be coherent meaning they should go together.
|
20 |
+
Make sure that the json keys are spelled correctly. Take a deep breath in every step for extra concentation
|
21 |
+
Now, create an engaging video using images and narration. The videos should be about {topic}. the output should be a json containing a list of objects with keys "narration" which is the narration of the video during that scene and a "image_prompts" which is a list of sdxl prompts during the narration You need create an appropriate number of image_prompts, proportional to the length of the current objects narration. It should match so as to make the story good and engaging. Only output the json markdown
|
App/Generate/Schema.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from typing import List, Optional
|
2 |
+
from pydantic import BaseModel, HttpUrl
|
3 |
+
from pydantic import validator
|
4 |
+
|
5 |
+
|
6 |
+
class GeneratorRequest(BaseModel):
|
7 |
+
prompt: str
|
App/Generate/Story/Story.py
ADDED
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from pydantic import BaseModel
|
3 |
+
from typing import List
|
4 |
+
|
5 |
+
|
6 |
+
class Scene(BaseModel):
|
7 |
+
narration: str
|
8 |
+
image_prompts: List[str]
|
9 |
+
|
10 |
+
def num_images(self):
|
11 |
+
return len(self.image_prompts)
|
12 |
+
|
13 |
+
|
14 |
+
class Story(BaseModel):
|
15 |
+
scenes: List[Scene]
|
16 |
+
|
17 |
+
@classmethod
|
18 |
+
def from_dict(cls, data):
|
19 |
+
# Parse JSON string into list of dictionaries
|
20 |
+
json_data = data
|
21 |
+
for item in json_data:
|
22 |
+
if "image_prompts" in item and "narration" not in item:
|
23 |
+
for key in item.keys():
|
24 |
+
if key != "image_prompts":
|
25 |
+
item["narration"] = item.pop(key)
|
26 |
+
break
|
27 |
+
|
28 |
+
# Convert JSON data into list of Scene objects
|
29 |
+
scenes = [Scene(**item) for item in json_data]
|
30 |
+
# Return Story object with all scenes
|
31 |
+
return cls(scenes=scenes)
|
32 |
+
|
33 |
+
@classmethod
|
34 |
+
def parse_markdown(cls, markdown_str):
|
35 |
+
# Extract JSON part from markdown string
|
36 |
+
try:
|
37 |
+
json_str = markdown_str.split("```json")[1].split("```")[0].strip()
|
38 |
+
except:
|
39 |
+
pass
|
40 |
+
return cls.from_dict(data=json.loads(json_str))
|
41 |
+
|
42 |
+
@property
|
43 |
+
def all_narrations(self):
|
44 |
+
return [scene.narration for scene in self.scenes]
|
45 |
+
|
46 |
+
@property
|
47 |
+
def all_images(self):
|
48 |
+
# return [scene.images_prompt for scene in self.scenes]
|
49 |
+
results = []
|
50 |
+
for scene in self.scenes:
|
51 |
+
results.extend(scene.images_prompt)
|
52 |
+
return results
|
53 |
+
|
54 |
+
def num_images_in_scene(self, scene_index):
|
55 |
+
return self.scenes[scene_index].num_images()
|
App/Generate/database/DescriptAPI.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import asyncio
|
2 |
+
import aiohttp
|
3 |
+
import os, uuid
|
4 |
+
from collections import deque
|
5 |
+
import wave
|
6 |
+
import uuid
|
7 |
+
from pydub import AudioSegment
|
8 |
+
|
9 |
+
|
10 |
+
import wave
|
11 |
+
import struct
|
12 |
+
|
13 |
+
|
14 |
+
def concatenate_wave_files(input_file_paths, output_file_path):
|
15 |
+
"""
|
16 |
+
Concatenates multiple wave files and saves the result to a new file.
|
17 |
+
|
18 |
+
:param input_file_paths: A list of paths to the input wave files.
|
19 |
+
:param output_file_path: The path to the output wave file.
|
20 |
+
"""
|
21 |
+
# Check if input file paths are provided
|
22 |
+
if not input_file_paths:
|
23 |
+
raise ValueError("No input file paths provided.")
|
24 |
+
|
25 |
+
# Validate output file path
|
26 |
+
if not output_file_path:
|
27 |
+
raise ValueError("Output file path is empty.")
|
28 |
+
|
29 |
+
# Validate input file paths
|
30 |
+
for input_file_path in input_file_paths:
|
31 |
+
if not input_file_path:
|
32 |
+
raise ValueError("Empty input file path found.")
|
33 |
+
|
34 |
+
# Validate and get parameters from the first input file
|
35 |
+
with wave.open(input_file_paths[0], "rb") as input_file:
|
36 |
+
n_channels = input_file.getnchannels()
|
37 |
+
sampwidth = input_file.getsampwidth()
|
38 |
+
framerate = input_file.getframerate()
|
39 |
+
comptype = input_file.getcomptype()
|
40 |
+
compname = input_file.getcompname()
|
41 |
+
|
42 |
+
# Open the output file for writing
|
43 |
+
output_file = wave.open(output_file_path, "wb")
|
44 |
+
output_file.setnchannels(n_channels)
|
45 |
+
output_file.setsampwidth(sampwidth)
|
46 |
+
output_file.setframerate(framerate)
|
47 |
+
output_file.setcomptype(comptype, compname)
|
48 |
+
|
49 |
+
# Concatenate and write data from all input files to the output file
|
50 |
+
for input_file_path in input_file_paths:
|
51 |
+
with wave.open(input_file_path, "rb") as input_file:
|
52 |
+
output_file.writeframes(input_file.readframes(input_file.getnframes()))
|
53 |
+
|
54 |
+
# Close the output file
|
55 |
+
output_file.close()
|
56 |
+
|
57 |
+
print(
|
58 |
+
f"Successfully concatenated {len(input_file_paths)} files into {output_file_path}"
|
59 |
+
)
|
60 |
+
|
61 |
+
|
62 |
+
# # Example usage
|
63 |
+
# input_files = ["./tmp/" + i for i in os.listdir("./tmp")]
|
64 |
+
# output_file = "./concatenated_output.wav"
|
65 |
+
# concatenate_wave_files(input_files, output_file)
|
66 |
+
|
67 |
+
|
68 |
+
def concatenate_wav_files(input_files, file_directory):
|
69 |
+
print(input_files)
|
70 |
+
output_file = file_directory + str(uuid.uuid4()) + "final.wav"
|
71 |
+
# Initialize variables for output file
|
72 |
+
output = None
|
73 |
+
output_params = None
|
74 |
+
|
75 |
+
try:
|
76 |
+
# Open output file for writing
|
77 |
+
output = wave.open(output_file, "wb")
|
78 |
+
|
79 |
+
# Loop through input files
|
80 |
+
for input_file in input_files:
|
81 |
+
with wave.open(input_file, "rb") as input_wav:
|
82 |
+
# If this is the first input file, set output file parameters
|
83 |
+
if output_params is None:
|
84 |
+
output_params = input_wav.getparams()
|
85 |
+
output.setparams(output_params)
|
86 |
+
# Otherwise, ensure consistency of parameters
|
87 |
+
else:
|
88 |
+
pass
|
89 |
+
# if input_wav.getparams() != output_params:
|
90 |
+
# raise ValueError(
|
91 |
+
# "Input file parameters do not match output file parameters."
|
92 |
+
# )
|
93 |
+
|
94 |
+
# Read data from input file and write to output file
|
95 |
+
output.writeframes(input_wav.readframes(input_wav.getnframes()))
|
96 |
+
finally:
|
97 |
+
# Close output file
|
98 |
+
if output is not None:
|
99 |
+
output.close()
|
100 |
+
return (output_file,)
|
101 |
+
|
102 |
+
|
103 |
+
class Speak:
|
104 |
+
def __init__(self, api_url="https://yakova-embedding.hf.space", dir="./tmp"):
|
105 |
+
self.api_url = api_url
|
106 |
+
self.dir = dir
|
107 |
+
|
108 |
+
async def _make_request(self, method, endpoint, json=None):
|
109 |
+
async with aiohttp.ClientSession() as session:
|
110 |
+
async with getattr(session, method)(
|
111 |
+
f"{self.api_url}/{endpoint}", json=json
|
112 |
+
) as response:
|
113 |
+
return await response.json()
|
114 |
+
|
115 |
+
async def say(self, text, speaker="Tabitha"):
|
116 |
+
data = {"text": text, "speaker": speaker}
|
117 |
+
|
118 |
+
response_data = await self._make_request("post", "descript_tts", json=data)
|
119 |
+
tts_id = response_data["id"]
|
120 |
+
|
121 |
+
# Poll the status endpoint until the TTS is ready
|
122 |
+
while True:
|
123 |
+
status_data = await self._make_request(
|
124 |
+
"post", "descript_status", json={"id": tts_id}
|
125 |
+
)
|
126 |
+
print(status_data)
|
127 |
+
if "status" in status_data:
|
128 |
+
if status_data["status"] == "done":
|
129 |
+
audio_url = status_data["url"]
|
130 |
+
temp = await self.download_file(audio_url)
|
131 |
+
return audio_url, temp
|
132 |
+
else:
|
133 |
+
pass
|
134 |
+
|
135 |
+
await asyncio.sleep(1)
|
136 |
+
|
137 |
+
async def download_file(self, url):
|
138 |
+
filename = str(uuid.uuid4()) + ".wav"
|
139 |
+
os.makedirs(self.dir, exist_ok=True)
|
140 |
+
save_path = os.path.join(self.dir, filename)
|
141 |
+
async with aiohttp.ClientSession() as session:
|
142 |
+
async with session.get(url) as response:
|
143 |
+
if response.status == 200:
|
144 |
+
with open(save_path, "wb") as file:
|
145 |
+
while True:
|
146 |
+
chunk = await response.content.read(1024)
|
147 |
+
if not chunk:
|
148 |
+
break
|
149 |
+
file.write(chunk)
|
150 |
+
|
151 |
+
return save_path
|
152 |
+
|
153 |
+
|
154 |
+
async def process_narrations(narrations):
|
155 |
+
speak = Speak()
|
156 |
+
tasks = deque()
|
157 |
+
results = []
|
158 |
+
files = []
|
159 |
+
|
160 |
+
async def process_task():
|
161 |
+
while tasks:
|
162 |
+
text = tasks.popleft()
|
163 |
+
result = await speak.say(text)
|
164 |
+
_, temp = result
|
165 |
+
results.append(result)
|
166 |
+
files.append(temp)
|
167 |
+
|
168 |
+
for narration in narrations:
|
169 |
+
tasks.append(narration)
|
170 |
+
if len(tasks) >= 2:
|
171 |
+
await asyncio.gather(*[process_task() for _ in range(2)])
|
172 |
+
|
173 |
+
# Process remaining tasks
|
174 |
+
await asyncio.gather(*[process_task() for _ in range(len(tasks))])
|
175 |
+
concatinated_file = concatenate_wav_files(files, speak.dir)
|
176 |
+
|
177 |
+
wav_file = AudioSegment.from_file(concatinated_file, format="wav")
|
178 |
+
duration_in_seconds = int(len(wav_file) / 1000)
|
179 |
+
|
180 |
+
return results, (concatinated_file, duration_in_seconds)
|
181 |
+
|
182 |
+
|
183 |
+
# # Example narrations
|
184 |
+
# narrations = [
|
185 |
+
# "Welcome to a journey through some of history's strangest moments!",
|
186 |
+
# "Did you know that in ancient Rome, mustaches were a big deal?",
|
187 |
+
# ]
|
188 |
+
|
189 |
+
|
190 |
+
# # Run the asyncio event loop
|
191 |
+
# async def main():
|
192 |
+
# results = await process_narrations(narrations)
|
193 |
+
# print("Results:", results)
|
194 |
+
|
195 |
+
|
196 |
+
# asyncio.run(main())
|
App/Generate/database/Model.py
ADDED
@@ -0,0 +1,301 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import databases
|
2 |
+
import orm
|
3 |
+
import asyncio, os
|
4 |
+
import uuid, random
|
5 |
+
from pydub import AudioSegment
|
6 |
+
from .DescriptAPI import Speak
|
7 |
+
from .Vercel import AsyncImageGenerator
|
8 |
+
import aiohttp
|
9 |
+
from typing import List
|
10 |
+
|
11 |
+
database_url = "sqlite+aiosqlite:///ok.db"
|
12 |
+
database = databases.Database(database_url)
|
13 |
+
models = orm.ModelRegistry(database=database)
|
14 |
+
|
15 |
+
|
16 |
+
class Project(orm.Model):
|
17 |
+
tablename = "projects"
|
18 |
+
start = 0
|
19 |
+
registry = models
|
20 |
+
fields = {
|
21 |
+
"id": orm.Integer(primary_key=True),
|
22 |
+
"name": orm.String(max_length=10_000),
|
23 |
+
"aspect_ratio": orm.Float(allow_null=True, default=0),
|
24 |
+
"transcript": orm.JSON(allow_null=True, default=[]),
|
25 |
+
"duration": orm.Integer(allow_null=True, default=0),
|
26 |
+
"assets": orm.JSON(allow_null=True, default=[]),
|
27 |
+
"links": orm.JSON(allow_null=True, default=[]),
|
28 |
+
"constants": orm.JSON(allow_null=True, default={}),
|
29 |
+
}
|
30 |
+
|
31 |
+
"""
|
32 |
+
assets.extend(
|
33 |
+
[
|
34 |
+
{"type": "video", "sequence": video_sequence},
|
35 |
+
{
|
36 |
+
"type": "audio",
|
37 |
+
"sequence": [
|
38 |
+
{
|
39 |
+
"type": "audio",
|
40 |
+
"name": "transcript.wav",
|
41 |
+
"start": trans_start,
|
42 |
+
"end": trans_end,
|
43 |
+
"props": {
|
44 |
+
"startFrom": trans_start * 30,
|
45 |
+
"endAt": trans_end * 30,
|
46 |
+
"volume": 5,
|
47 |
+
},
|
48 |
+
},
|
49 |
+
],
|
50 |
+
},
|
51 |
+
{
|
52 |
+
"type": "background",
|
53 |
+
"sequence": [
|
54 |
+
{
|
55 |
+
"type": "background",
|
56 |
+
"name": "background.mp3",
|
57 |
+
"start": trans_start,
|
58 |
+
"end": trans_end,
|
59 |
+
"props": {
|
60 |
+
"startFrom": trans_start * 30,
|
61 |
+
"endAt": trans_end * 30,
|
62 |
+
"volume": 0.4,
|
63 |
+
},
|
64 |
+
},
|
65 |
+
],
|
66 |
+
},
|
67 |
+
]
|
68 |
+
)
|
69 |
+
|
70 |
+
|
71 |
+
{
|
72 |
+
"type": "image",
|
73 |
+
"name": file_name,
|
74 |
+
"start": image["start"],
|
75 |
+
"end": image["end"],
|
76 |
+
}
|
77 |
+
"""
|
78 |
+
|
79 |
+
async def get_all_scenes(self):
|
80 |
+
return await Scene.objects.filter(project=self).all()
|
81 |
+
|
82 |
+
async def generate_json(self):
|
83 |
+
project_scenes: List[Scene] = await self.get_all_scenes()
|
84 |
+
self.links = []
|
85 |
+
self.assets = []
|
86 |
+
image_assets = []
|
87 |
+
video_assets = []
|
88 |
+
audio_assets = []
|
89 |
+
|
90 |
+
transitions = [
|
91 |
+
"WaveRight_transparent.webm",
|
92 |
+
"WaveLeft_transparent.webm",
|
93 |
+
# "WaveBlue_transparent.webm",
|
94 |
+
# "Wave_transparent.webm",
|
95 |
+
# "Swirl_transparent.webm",
|
96 |
+
# "Snow_transparent.webm",
|
97 |
+
# "Likes_transparent.webm",
|
98 |
+
# "Lightning_transparent.webm",
|
99 |
+
# "Happy_transparent.webm",
|
100 |
+
# "Fire_transparent.webm",
|
101 |
+
# "CurlingWave_transparent.webm",
|
102 |
+
# "Cloud_transparent.webm",
|
103 |
+
]
|
104 |
+
|
105 |
+
self.links.append(
|
106 |
+
{
|
107 |
+
"file_name": "sfx_1.mp3",
|
108 |
+
"link": "https://dm0qx8t0i9gc9.cloudfront.net/previews/audio/BsTwCwBHBjzwub4i4/camera-shutter-05_MJn9CZV__NWM.mp3?type=preview&origin=AUDIOBLOCKS×tamp_ms=1715270679690&publicKey=kUhrS9sKVrQMTvByQMAGMM0jwRbJ4s31HTPVkfDGmwGhYqzmWJHsjIw5fZCkI7ba&organizationId=105711&apiVersion=2.0&stockItemId=2248&resolution=&endUserId=414d29f16694d76c58e7998200a8dcf6f28dc165&projectId=f734c6d7-e39d-4c1d-8f41-417f94cd37ce&searchId=4b01b35a-fafc-45fb-9f40-e98849cb71ac&searchPageId=f24f4c5b-9976-4fd3-9bac-d217d87c723d",
|
109 |
+
}
|
110 |
+
)
|
111 |
+
for scene in project_scenes:
|
112 |
+
_, file_name = os.path.split(scene.narration_path)
|
113 |
+
self.duration += scene.narration_duration + 1 ## added one for spaces
|
114 |
+
self.links.append({"file_name": file_name, "link": scene.narration_link})
|
115 |
+
|
116 |
+
# narration
|
117 |
+
audio_assets.append(
|
118 |
+
{
|
119 |
+
"type": "audio",
|
120 |
+
"name": file_name,
|
121 |
+
"start": self.start,
|
122 |
+
"end": self.start + scene.narration_duration + 1,
|
123 |
+
"props": {
|
124 |
+
"startFrom": 0,
|
125 |
+
"endAt": scene.narration_duration * 30,
|
126 |
+
"volume": 5,
|
127 |
+
},
|
128 |
+
}
|
129 |
+
)
|
130 |
+
|
131 |
+
## images and transitions
|
132 |
+
for image in scene.images:
|
133 |
+
file_name = str(uuid.uuid4()) + ".png"
|
134 |
+
self.links.append({"file_name": file_name, "link": image})
|
135 |
+
image_assets.append(
|
136 |
+
{
|
137 |
+
"type": "image",
|
138 |
+
"name": file_name,
|
139 |
+
"start": self.start,
|
140 |
+
"end": self.start + scene.image_duration,
|
141 |
+
}
|
142 |
+
)
|
143 |
+
self.start = self.start + scene.image_duration
|
144 |
+
|
145 |
+
## transitions between images
|
146 |
+
video_assets.append(
|
147 |
+
{
|
148 |
+
"type": "video",
|
149 |
+
"name": "Effects/" + random.choice(transitions),
|
150 |
+
"start": self.start - 1,
|
151 |
+
"end": self.start + 2,
|
152 |
+
"props": {
|
153 |
+
"startFrom": 1 * 30,
|
154 |
+
"endAt": 3 * 30,
|
155 |
+
"volume": 0,
|
156 |
+
},
|
157 |
+
}
|
158 |
+
)
|
159 |
+
|
160 |
+
self.assets.append({"type": "audio", "sequence": audio_assets})
|
161 |
+
## add the images to assets
|
162 |
+
self.assets.append({"type": "image", "sequence": image_assets})
|
163 |
+
self.assets.append(
|
164 |
+
{"type": "video", "sequence": video_assets},
|
165 |
+
)
|
166 |
+
self.constants = {
|
167 |
+
"duration": self.duration * 30,
|
168 |
+
"height": 1920,
|
169 |
+
"width": 1080,
|
170 |
+
}
|
171 |
+
|
172 |
+
await self.update(**self.__dict__)
|
173 |
+
return {"links": self.links, "assets": self.assets, "constants": self.constants}
|
174 |
+
|
175 |
+
async def generate_transcript(self):
|
176 |
+
pass
|
177 |
+
|
178 |
+
|
179 |
+
class Scene(orm.Model):
|
180 |
+
tts = Speak()
|
181 |
+
tablename = "scenes"
|
182 |
+
registry = models
|
183 |
+
fields = {
|
184 |
+
"id": orm.Integer(primary_key=True),
|
185 |
+
"project": orm.ForeignKey(Project),
|
186 |
+
"images": orm.JSON(default=None),
|
187 |
+
"narration": orm.String(max_length=10_000, allow_null=True, default=""),
|
188 |
+
"image_prompts": orm.JSON(default=None),
|
189 |
+
"narration_duration": orm.Float(allow_null=True, default=0),
|
190 |
+
"image_duration": orm.Float(allow_null=True, default=0),
|
191 |
+
"narration_path": orm.String(
|
192 |
+
max_length=100,
|
193 |
+
allow_null=True,
|
194 |
+
default="",
|
195 |
+
),
|
196 |
+
"narration_link": orm.String(max_length=10_000, allow_null=True, default=""),
|
197 |
+
}
|
198 |
+
|
199 |
+
async def generate_scene_data(self):
|
200 |
+
# Run narrate() and generate_images() concurrently
|
201 |
+
await asyncio.gather(self.narrate(), self.generate_images())
|
202 |
+
self.calculate_durations()
|
203 |
+
|
204 |
+
async def narrate(self):
|
205 |
+
link, path = await self._retry_narration_generation()
|
206 |
+
self.narration_path = path
|
207 |
+
self.narration_link = link
|
208 |
+
|
209 |
+
async def _retry_narration_generation(self):
|
210 |
+
|
211 |
+
retry_count = 0
|
212 |
+
while retry_count < 3:
|
213 |
+
try:
|
214 |
+
return await self.tts.say(text=self.narration)
|
215 |
+
except Exception as e:
|
216 |
+
print(f"Failed to generate narration: {e}")
|
217 |
+
retry_count += 1
|
218 |
+
await asyncio.sleep(1) # Add delay before retrying
|
219 |
+
|
220 |
+
print("Failed to generate narration after 3 attempts.")
|
221 |
+
|
222 |
+
def calculate_durations(self):
|
223 |
+
wav_file = AudioSegment.from_file(self.narration_path, format="wav")
|
224 |
+
self.narration_duration = int(len(wav_file) / 1000)
|
225 |
+
self.image_duration = self.narration_duration / len(self.image_prompts)
|
226 |
+
|
227 |
+
async def generate_images(self):
|
228 |
+
self.images = []
|
229 |
+
async with aiohttp.ClientSession() as session:
|
230 |
+
image_generator = AsyncImageGenerator(session)
|
231 |
+
for payload in self.image_prompts:
|
232 |
+
result = await image_generator.generate_image(payload)
|
233 |
+
status = await image_generator.fetch_image_status(result["id"])
|
234 |
+
self.images.extend(status["output"])
|
235 |
+
|
236 |
+
|
237 |
+
class Transition(orm.Model):
|
238 |
+
tablename = "transitions"
|
239 |
+
registry = models
|
240 |
+
fields = {
|
241 |
+
"id": orm.Integer(primary_key=True),
|
242 |
+
"name": orm.String(max_length=100),
|
243 |
+
"file_path": orm.String(max_length=100),
|
244 |
+
}
|
245 |
+
|
246 |
+
|
247 |
+
class BackgroundMusic(orm.Model):
|
248 |
+
tablename = "background_music"
|
249 |
+
registry = models
|
250 |
+
fields = {
|
251 |
+
"id": orm.Integer(primary_key=True),
|
252 |
+
"name": orm.String(max_length=100),
|
253 |
+
"file_path": orm.String(max_length=100),
|
254 |
+
}
|
255 |
+
|
256 |
+
|
257 |
+
# class Testy(orm.Model):
|
258 |
+
# tablename = "asd"
|
259 |
+
# registry = models
|
260 |
+
# fields = {
|
261 |
+
# "id": orm.Integer(primary_key=True),
|
262 |
+
# "duration": orm.Float(allow_null=True,default=None),
|
263 |
+
# "area": orm.Float(allow_null=True,default=None),
|
264 |
+
# "radius": orm.Float(allow_null=True,default=None),
|
265 |
+
# }
|
266 |
+
|
267 |
+
# def calculate_durations(self):
|
268 |
+
# self.area = self.radius**2 * 3.14
|
269 |
+
# pass
|
270 |
+
|
271 |
+
|
272 |
+
# # Create the tables
|
273 |
+
async def create_tables():
|
274 |
+
datas = {
|
275 |
+
"narration": "Welcome to a journey through some of history's strangest moments! Get ready to explore the bizarre, the unusual, and the downright weird.",
|
276 |
+
"image_prompts": [
|
277 |
+
"Vintage book opening, revealing strange facts, mixed media collage, curious and intriguing, mysterious, eccentric, macro lens, soft lighting, conceptual photography, cross-processed film, surreal, warm tones, textured paper."
|
278 |
+
],
|
279 |
+
}
|
280 |
+
|
281 |
+
await models._create_all(database_url)
|
282 |
+
x = await Project.objects.create(name="avatar")
|
283 |
+
scene = await Scene.objects.create(project=x)
|
284 |
+
scene.narration = datas["narration"]
|
285 |
+
scene.image_prompts = datas["image_prompts"]
|
286 |
+
|
287 |
+
await scene.generate_scene_data()
|
288 |
+
await scene.objects.update(**scene.__dict__)
|
289 |
+
p = await x.get_all_scenes()
|
290 |
+
print(p)
|
291 |
+
print(scene.__dict__)
|
292 |
+
|
293 |
+
|
294 |
+
# asyncio.run(create_tables())
|
295 |
+
# # Run the function to create tables
|
296 |
+
# await create_tables()
|
297 |
+
|
298 |
+
# # Example usage:
|
299 |
+
# await Note.objects.create(text="Buy the groceries.", completed=False)
|
300 |
+
# note = await Note.objects.get(id=1)
|
301 |
+
# print(note)
|
App/Generate/database/Test.py
ADDED
@@ -0,0 +1,102 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import aiohttp
|
2 |
+
import asyncio
|
3 |
+
import json
|
4 |
+
import random
|
5 |
+
import secrets
|
6 |
+
|
7 |
+
|
8 |
+
async def generate_with_playground(prompt, resolution):
|
9 |
+
nevPrompt = "text, watermark, blurry, haze, low contrast, low quality, underexposed, ugly, deformed, boring, bad quality, cartoon, ((disfigured)), ((bad art)), ((deformed)), ((poorly drawn)), ((extra limbs)), ((close up)), ((b&w)), weird colors, blurry, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft, low detail, low quality, double face, 2 faces, cropped, ugly, low-res, tiling, grainy, cropped, ostentatious, ugly, oversaturated, grain, low resolution, disfigured, blurry, bad anatomy, disfigured, poorly drawn face, mutant, mutated, extra limb, ugly, poorly drawn hands, missing limbs, blurred, floating limbs, disjointed limbs, deformed hands, blurred, out of focus, long neck, long body, ugly, disgusting, childish, cut off cropped, distorted, imperfect, surreal, bad hands, text, error, extra digit, fewer digits, cropped , worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, Lots of hands, extra limbs, extra fingers, conjoined fingers, deformed fingers, old, ugly eyes, imperfect eyes, skewed eyes , unnatural face, stiff face, stiff body, unbalanced body, unnatural body, lacking body, details are not clear, cluttered, details are sticky, details are low, distorted details, ugly hands, imperfect hands, (mutated hands and fingers:1.5), (long body :1.3), (mutation, poorly drawn :1.2) bad hands, fused ha nd, missing hand, disappearing arms, hands, disappearing thigh, disappearing calf, disappearing legs, ui, missing fingers"
|
10 |
+
width, height = None, None
|
11 |
+
if resolution == "Square":
|
12 |
+
width = 1024
|
13 |
+
height = 1024
|
14 |
+
elif resolution == "Wide":
|
15 |
+
width = 1280
|
16 |
+
height = 768
|
17 |
+
elif resolution == "Portrait":
|
18 |
+
width = 768
|
19 |
+
height = 1280
|
20 |
+
|
21 |
+
session_hash = generate_session_hash()
|
22 |
+
random_digit = generate_random_digits()
|
23 |
+
root_url = "https://playgroundai-playground-v2-5.hf.space/--replicas/bdj8s"
|
24 |
+
|
25 |
+
url_join_queue = f"https://playgroundai-playground-v2-5.hf.space/queue/join?fn_index=3&session_hash={session_hash}"
|
26 |
+
headers = {"Content-Type": "application/json"}
|
27 |
+
async with aiohttp.ClientSession() as session:
|
28 |
+
async with session.get(url_join_queue) as resp:
|
29 |
+
async for line in resp.content:
|
30 |
+
temp = line.decode().replace("data: ", "")
|
31 |
+
print(temp, "here")
|
32 |
+
try:
|
33 |
+
temp = eval(temp)
|
34 |
+
except:
|
35 |
+
continue
|
36 |
+
print(type(temp))
|
37 |
+
data = temp
|
38 |
+
if data["msg"] == "send_data":
|
39 |
+
event_id = data.get("event_id")
|
40 |
+
await session.post(
|
41 |
+
"https://playgroundai-playground-v2-5.hf.space/queue/data",
|
42 |
+
json={
|
43 |
+
"data": [
|
44 |
+
prompt,
|
45 |
+
nevPrompt,
|
46 |
+
True,
|
47 |
+
random_digit,
|
48 |
+
width,
|
49 |
+
height,
|
50 |
+
3,
|
51 |
+
True,
|
52 |
+
],
|
53 |
+
"event_data": None,
|
54 |
+
"fn_index": 3,
|
55 |
+
"trigger_id": 6,
|
56 |
+
"session_hash": session_hash,
|
57 |
+
"event_id": event_id,
|
58 |
+
},
|
59 |
+
)
|
60 |
+
elif data["msg"] == "process_completed":
|
61 |
+
image_paths = data["output"]["data"][0]
|
62 |
+
print(image_paths, "hererereere")
|
63 |
+
first_image_path = (
|
64 |
+
image_paths[0]["image"]["path"] if image_paths else None
|
65 |
+
)
|
66 |
+
if first_image_path:
|
67 |
+
print(first_image_path)
|
68 |
+
full_url = f"{root_url}/file={first_image_path}"
|
69 |
+
return {
|
70 |
+
"images": [{"url": full_url}],
|
71 |
+
"modelUsed": "Playground",
|
72 |
+
}
|
73 |
+
else:
|
74 |
+
raise ValueError(
|
75 |
+
"No image path found in the process_completed message."
|
76 |
+
)
|
77 |
+
|
78 |
+
|
79 |
+
async def fetch_and_extract_root_url(url):
|
80 |
+
async with aiohttp.ClientSession() as session:
|
81 |
+
async with session.get(url) as resp:
|
82 |
+
return await resp.text()
|
83 |
+
|
84 |
+
|
85 |
+
def generate_session_hash():
|
86 |
+
chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
87 |
+
return "".join(secrets.choice(chars) for _ in range(5))
|
88 |
+
|
89 |
+
|
90 |
+
def generate_random_digits():
|
91 |
+
return random.randint(100000000, 999999999)
|
92 |
+
|
93 |
+
|
94 |
+
async def main():
|
95 |
+
prompt = "COMICBOOK ILLUSTRATION Medea, Jason, two sons, black background, 8k"
|
96 |
+
resolution = "Square"
|
97 |
+
result = await generate_with_playground(prompt, resolution)
|
98 |
+
print(result)
|
99 |
+
|
100 |
+
|
101 |
+
# Run the main function
|
102 |
+
asyncio.run(main())
|
App/Generate/database/Vercel.py
ADDED
@@ -0,0 +1,104 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import aiohttp, asyncio, pprint
|
2 |
+
from collections import deque
|
3 |
+
|
4 |
+
|
5 |
+
class AsyncImageGenerator:
|
6 |
+
def __init__(self, session):
|
7 |
+
self.session = session
|
8 |
+
self.base = "https://auto-svg.vercel.app/"
|
9 |
+
|
10 |
+
async def generate_image(self, payload, max_retries=50):
|
11 |
+
retries = 0
|
12 |
+
while retries < max_retries:
|
13 |
+
try:
|
14 |
+
url = f"{self.base}/predictions"
|
15 |
+
data = {
|
16 |
+
"input": {
|
17 |
+
"width": 1024,
|
18 |
+
"height": 1024,
|
19 |
+
"prompt": payload,
|
20 |
+
"scheduler": "DPMSolver++",
|
21 |
+
"num_outputs": 1,
|
22 |
+
"guidance_scale": 2,
|
23 |
+
"negative_prompt": "",
|
24 |
+
# "negative_prompt": "ugly, deformed, noisy, blurry, distorted, out of focus, bad anatomy, extra limbs, poorly drawn face, poorly drawn hands, missing fingers, color, 3D, 2D, video game, cgi, plastic, fake, artificial, smooth",
|
25 |
+
"negative_prompt": "text, watermark, blurry, haze, low contrast, low quality, underexposed, ugly, deformed, boring, bad quality, cartoon, ((disfigured)), ((bad art)), ((deformed)), ((poorly drawn)), ((extra limbs)), ((close up)), ((b&w)), weird colors, blurry, ugly, tiling, poorly drawn hands, poorly drawn feet, poorly drawn face, out of frame, extra limbs, disfigured, body out of frame, blurry, bad anatomy, blurred, watermark, grainy, signature, cut off, draft, low detail, low quality, double face, 2 faces, cropped, ugly, low-res, tiling, grainy, cropped, ostentatious, ugly, oversaturated, grain, low resolution, disfigured, blurry, bad anatomy, disfigured, poorly drawn face, mutant, mutated, extra limb, ugly, poorly drawn hands, missing limbs, blurred, floating limbs, disjointed limbs, deformed hands, blurred, out of focus, long neck, long body, ugly, disgusting, childish, cut off cropped, distorted, imperfect, surreal, bad hands, text, error, extra digit, fewer digits, cropped , worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry, artist name, Lots of hands, extra limbs, extra fingers, conjoined fingers, deformed fingers, old, ugly eyes, imperfect eyes, skewed eyes , unnatural face, stiff face, stiff body, unbalanced body, unnatural body, lacking body, details are not clear, cluttered, details are sticky, details are low, distorted details, ugly hands, imperfect hands, (mutated hands and fingers:1.5), (long body :1.3), (mutation, poorly drawn :1.2) bad hands, fused ha nd, missing hand, disappearing arms, hands, disappearing thigh, disappearing calf, disappearing legs, ui, missing fingers",
|
26 |
+
"num_inference_steps": 25,
|
27 |
+
},
|
28 |
+
"path": "models/playgroundai/playground-v2.5-1024px-aesthetic/versions/a45f82a1382bed5c7aeb861dac7c7d191b0fdf74d8d57c4a0e6ed7d4d0bf7d24",
|
29 |
+
}
|
30 |
+
|
31 |
+
async with self.session.post(url, json=data) as response:
|
32 |
+
response.raise_for_status()
|
33 |
+
return await response.json()
|
34 |
+
except aiohttp.ClientResponseError as e:
|
35 |
+
if e.status == 500:
|
36 |
+
retries += 1
|
37 |
+
print(f"Retry {retries} after 500 error")
|
38 |
+
await asyncio.sleep(1) # Add a delay before retrying
|
39 |
+
else:
|
40 |
+
raise e
|
41 |
+
|
42 |
+
# If max retries reached, raise an exception
|
43 |
+
raise Exception("Max retries reached")
|
44 |
+
|
45 |
+
async def fetch_image_status(self, image_id):
|
46 |
+
url = f"https://replicate.com/api/predictions/{image_id}"
|
47 |
+
async with self.session.get(url) as response:
|
48 |
+
response.raise_for_status()
|
49 |
+
temp = await response.json()
|
50 |
+
status = temp
|
51 |
+
while status["status"] != "succeeded":
|
52 |
+
status = await self._fetch_image_status(image_id)
|
53 |
+
await asyncio.sleep(3)
|
54 |
+
return status
|
55 |
+
|
56 |
+
async def _fetch_image_status(self, image_id):
|
57 |
+
url = f"https://replicate.com/api/predictions/{image_id}"
|
58 |
+
async with self.session.get(url) as response:
|
59 |
+
response.raise_for_status()
|
60 |
+
temp = await response.json()
|
61 |
+
status = temp
|
62 |
+
return status
|
63 |
+
|
64 |
+
|
65 |
+
async def process_images(payloads):
|
66 |
+
async with aiohttp.ClientSession() as session:
|
67 |
+
image_generator = AsyncImageGenerator(session)
|
68 |
+
tasks = deque()
|
69 |
+
results = []
|
70 |
+
|
71 |
+
async def process_task():
|
72 |
+
while tasks:
|
73 |
+
payload = tasks.popleft()
|
74 |
+
result = await image_generator.generate_image(payload)
|
75 |
+
status = await image_generator.fetch_image_status(result["id"])
|
76 |
+
print(status["output"])
|
77 |
+
results.extend(status["output"])
|
78 |
+
|
79 |
+
for payload in payloads:
|
80 |
+
tasks.append(payload)
|
81 |
+
if len(tasks) >= 2:
|
82 |
+
await asyncio.gather(*[process_task() for _ in range(2)])
|
83 |
+
|
84 |
+
# Process remaining tasks
|
85 |
+
await asyncio.gather(*[process_task() for _ in range(len(tasks))])
|
86 |
+
|
87 |
+
return results
|
88 |
+
|
89 |
+
|
90 |
+
# # Example payloads
|
91 |
+
# payloads = [
|
92 |
+
# """
|
93 |
+
# Roman gladiator fighting a crocodile in the Colosseum, ancient Roman entertainment, oil painting, gruesome and thrilling, blood-stained sand, telephoto lens, afternoon golden hour lighting, cinematic film style, realistic with dramatic shadows
|
94 |
+
# """
|
95 |
+
# ]
|
96 |
+
|
97 |
+
|
98 |
+
# # Run the asyncio event loop
|
99 |
+
# async def main():
|
100 |
+
# results = await process_images(payloads)
|
101 |
+
# pprint.pprint(results)
|
102 |
+
|
103 |
+
|
104 |
+
# asyncio.run(main())
|
App/Generate/generatorRoutes.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from fastapi import APIRouter, HTTPException, status, BackgroundTasks, UploadFile, Query
|
2 |
+
from .Schema import GeneratorRequest
|
3 |
+
from .utils.GroqInstruct import chatbot
|
4 |
+
from .Story.Story import Story
|
5 |
+
import asyncio, pprint, json
|
6 |
+
from tqdm import tqdm
|
7 |
+
from .database.Model import models, database_url, Scene, Project
|
8 |
+
from .utils.RenderVideo import RenderVideo
|
9 |
+
from .Prompts.StoryGen import Prompt
|
10 |
+
|
11 |
+
|
12 |
+
async def update_scene(model_scene):
|
13 |
+
await model_scene.generate_scene_data()
|
14 |
+
await model_scene.update(**model_scene.__dict__)
|
15 |
+
|
16 |
+
|
17 |
+
async def main(request: GeneratorRequest):
|
18 |
+
topic = request.prompt
|
19 |
+
renderr = RenderVideo()
|
20 |
+
|
21 |
+
await models._create_all(database_url)
|
22 |
+
message = chatbot(Prompt.format(topic=topic))
|
23 |
+
|
24 |
+
generated_story = Story.from_dict(message["scenes"])
|
25 |
+
|
26 |
+
print("Generated Story ✅")
|
27 |
+
|
28 |
+
x = await Project.objects.create(name=topic[0:100])
|
29 |
+
|
30 |
+
# Assuming generated_story.scenes is a list of scenes
|
31 |
+
scene_updates = []
|
32 |
+
with tqdm(total=len(generated_story.scenes)) as pbar:
|
33 |
+
for i in range(0, len(generated_story.scenes), 2):
|
34 |
+
batch = generated_story.scenes[i : i + 2] # Get a batch of two story scenes
|
35 |
+
batch_updates = []
|
36 |
+
|
37 |
+
for story_scene in batch:
|
38 |
+
model_scene = await Scene.objects.create(project=x)
|
39 |
+
model_scene.image_prompts = story_scene.image_prompts
|
40 |
+
model_scene.narration = story_scene.narration
|
41 |
+
await model_scene.update(**model_scene.__dict__)
|
42 |
+
batch_updates.append(
|
43 |
+
update_scene(model_scene)
|
44 |
+
) # Append update coroutine to batch_updates
|
45 |
+
scene_updates.extend(batch_updates) # Accumulate updates for later awaiting
|
46 |
+
await asyncio.gather(
|
47 |
+
*batch_updates
|
48 |
+
) # Await update coroutines for this batch
|
49 |
+
pbar.update(len(batch)) # Increment progress bar by the size of the batch
|
50 |
+
|
51 |
+
temp = await x.generate_json()
|
52 |
+
await renderr.render_video(temp)
|
53 |
+
|
54 |
+
|
55 |
+
generator_router = APIRouter(tags=["video-Generator"])
|
56 |
+
|
57 |
+
|
58 |
+
@generator_router.post("/generate_video")
|
59 |
+
async def generate_video(
|
60 |
+
videoRequest: GeneratorRequest, background_task: BackgroundTasks
|
61 |
+
):
|
62 |
+
background_task.add_task(main, videoRequest)
|
63 |
+
return {"task_id": "started"}
|
App/Generate/utils/Bing.py
ADDED
@@ -0,0 +1,53 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import aiohttp
|
2 |
+
import json
|
3 |
+
import re
|
4 |
+
|
5 |
+
|
6 |
+
class Bing:
|
7 |
+
def __init__(self):
|
8 |
+
self.url = "https://api.groq.com/openai/v1/chat/completions"
|
9 |
+
self.headers = {
|
10 |
+
"Authorization": "Bearer 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",
|
11 |
+
"accept": "application/json",
|
12 |
+
"content-type": "application/json",
|
13 |
+
}
|
14 |
+
|
15 |
+
@staticmethod
|
16 |
+
def remove_links(text):
|
17 |
+
# Remove links
|
18 |
+
cleaned_text = re.sub(r"\[\d+\]:\shttps?://\S+\s?\"\"", "", text)
|
19 |
+
|
20 |
+
# Remove other weird characters
|
21 |
+
# cleaned_text = re.sub(r'[^a-zA-Z0-9\s.,?!-]', '', cleaned_text)
|
22 |
+
|
23 |
+
return cleaned_text
|
24 |
+
|
25 |
+
async def chat(self, message, remove_links=False, content=False):
|
26 |
+
messages = [
|
27 |
+
{"role": "user", "content": message},
|
28 |
+
{"role": "assistant", "content": "You are an amazing Youtuber"},
|
29 |
+
# Add more messages as needed
|
30 |
+
]
|
31 |
+
|
32 |
+
payload = {
|
33 |
+
"messages": messages,
|
34 |
+
"stream": False,
|
35 |
+
"model": "mixtral-8x7b-32768",
|
36 |
+
"temperature": 0.5,
|
37 |
+
"presence_penalty": 0,
|
38 |
+
"frequency_penalty": 0,
|
39 |
+
"top_p": 1,
|
40 |
+
}
|
41 |
+
|
42 |
+
async with aiohttp.ClientSession() as session:
|
43 |
+
async with session.post(
|
44 |
+
self.url, headers=self.headers, data=json.dumps(payload)
|
45 |
+
) as response:
|
46 |
+
result = await response.json()
|
47 |
+
# print(result)
|
48 |
+
# result = json.loads(result)
|
49 |
+
assistant_message_content = result["choices"][0]["message"]["content"]
|
50 |
+
# print(assistant_message_content)
|
51 |
+
if remove_links:
|
52 |
+
return self.remove_links(assistant_message_content)
|
53 |
+
return assistant_message_content
|
App/Generate/utils/GroqInstruct.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
+
import instructor
|
3 |
+
from groq import Groq
|
4 |
+
|
5 |
+
from pydantic import BaseModel, Field
|
6 |
+
|
7 |
+
from typing import List, Dict
|
8 |
+
from pydantic import BaseModel
|
9 |
+
|
10 |
+
|
11 |
+
class Scene(BaseModel):
|
12 |
+
narration: str
|
13 |
+
image_prompts: List[str]
|
14 |
+
|
15 |
+
|
16 |
+
class VideoOutput(BaseModel):
|
17 |
+
scenes: List[Scene]
|
18 |
+
|
19 |
+
|
20 |
+
client = Groq(api_key="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")
|
21 |
+
|
22 |
+
# By default, the patch function will patch the ChatCompletion.create and ChatCompletion.create methods to support the response_model parameter
|
23 |
+
client = instructor.from_groq(client, mode=instructor.Mode.JSON)
|
24 |
+
|
25 |
+
|
26 |
+
# Now, we can use the response_model parameter using only a base model
|
27 |
+
# rather than having to use the OpenAISchema class
|
28 |
+
|
29 |
+
|
30 |
+
def chatbot(prompt):
|
31 |
+
|
32 |
+
response: VideoOutput = client.chat.completions.create(
|
33 |
+
model="mixtral-8x7b-32768",
|
34 |
+
# model="gemma-7b-it",
|
35 |
+
# model="llama2-70b-4096",
|
36 |
+
# model="llama3-70b-8192",
|
37 |
+
max_tokens=5000,
|
38 |
+
response_model=VideoOutput,
|
39 |
+
# kwargs={
|
40 |
+
# # "temperature": 1,
|
41 |
+
# "max_tokens": 5000,
|
42 |
+
# # "top_p": 1,
|
43 |
+
# "stream": False,
|
44 |
+
# "stop": None,
|
45 |
+
# },
|
46 |
+
messages=[
|
47 |
+
{
|
48 |
+
"role": "system",
|
49 |
+
"content": """Take a deep breath. You are an amazing story teller, you keep your audience engaged here is an example of one of your stories:
|
50 |
+
Title : Why are Jews so rich
|
51 |
+
it starts in
|
52 |
+
medieval Europe the church and Islamic
|
53 |
+
law both prohibit money lending but not
|
54 |
+
Jews they loan money and interest makes
|
55 |
+
them very wealthy so wealthy that even
|
56 |
+
powerful monarchs borrow from them by
|
57 |
+
the 17th century they become key members
|
58 |
+
of Royal courts known as Court Jews
|
59 |
+
financial advisers to Kings and Queens
|
60 |
+
when the world transitioned from
|
61 |
+
monarchy to democracy Jewish people with
|
62 |
+
their centuries of experience were the
|
63 |
+
first to take advantage of new banking
|
64 |
+
infrastructures today however the world
|
65 |
+
is very different Muslims Christians
|
66 |
+
Jews everyone enjoys interest
|
67 |
+
|
68 |
+
|
69 |
+
""",
|
70 |
+
},
|
71 |
+
{
|
72 |
+
"role": "user",
|
73 |
+
"content": prompt,
|
74 |
+
},
|
75 |
+
],
|
76 |
+
)
|
77 |
+
return response.dict()
|
App/Generate/utils/HuggingChat.py
ADDED
@@ -0,0 +1,36 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from hugchat import hugchat
|
2 |
+
from hugchat.login import Login
|
3 |
+
|
4 |
+
|
5 |
+
email = "mihorag447@vikinoko.com"
|
6 |
+
passwd = "IamstewpedW!23@"
|
7 |
+
# Log in to huggingface and grant authorization to huggingchat
|
8 |
+
sign = Login(email, passwd)
|
9 |
+
cookies = sign.login()
|
10 |
+
|
11 |
+
# # Save cookies to the local directory
|
12 |
+
cookie_path_dir = "./cookies_snapshot"
|
13 |
+
sign.saveCookiesToDir(cookie_path_dir)
|
14 |
+
|
15 |
+
# Load cookies when you restart your program:
|
16 |
+
# sign = login(email, None)
|
17 |
+
cookies = sign.loadCookiesFromDir(cookie_path_dir)
|
18 |
+
|
19 |
+
|
20 |
+
# Create a ChatBot
|
21 |
+
chatbot = hugchat.ChatBot(
|
22 |
+
cookies=cookies.get_dict(),
|
23 |
+
system_prompt="You are an amazing youtuber. A true creative master genius",
|
24 |
+
)
|
25 |
+
|
26 |
+
model_index = 0
|
27 |
+
models = chatbot.get_available_llm_models()
|
28 |
+
print(chatbot.active_model)
|
29 |
+
if not chatbot.active_model.name == "CohereForAI/c4ai-command-r-plus":
|
30 |
+
for model in models:
|
31 |
+
print(model.name, "switching..")
|
32 |
+
if model.name == "CohereForAI/c4ai-command-r-plus":
|
33 |
+
model_index = models.index(model)
|
34 |
+
break
|
35 |
+
print(chatbot.current_conversation.system_prompt)
|
36 |
+
chatbot.switch_llm(model_index)
|
App/Generate/utils/RenderVideo.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import aiohttp, json
|
2 |
+
|
3 |
+
|
4 |
+
class RenderVideo:
|
5 |
+
def __init__(self, url="https://yakova-rectifier.hf.space/create-video"):
|
6 |
+
self.url = url
|
7 |
+
|
8 |
+
@staticmethod
|
9 |
+
def _parse_response(response: str) -> dict:
|
10 |
+
return json.loads(response)
|
11 |
+
|
12 |
+
async def post_request(self, data: dict) -> dict:
|
13 |
+
headers = {"Accept": "application/json", "Content-Type": "application/json"}
|
14 |
+
async with aiohttp.ClientSession() as session:
|
15 |
+
async with session.post(
|
16 |
+
self.url, data=json.dumps(data), headers=headers
|
17 |
+
) as resp:
|
18 |
+
response = await resp.text()
|
19 |
+
result = self._parse_response(response)
|
20 |
+
return result
|
21 |
+
|
22 |
+
async def render_video(self, data) -> dict:
|
23 |
+
result = await self.post_request(data)
|
24 |
+
return result
|
App/Generate/utils/VideoEditor.py
ADDED
@@ -0,0 +1,90 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel, Field, validator
|
2 |
+
from typing import List, Dict
|
3 |
+
|
4 |
+
|
5 |
+
|
6 |
+
transitions = [
|
7 |
+
"WaveRight_transparent.webm",
|
8 |
+
"WaveLeft_transparent.webm",
|
9 |
+
"WaveBlue_transparent.webm",
|
10 |
+
"Wave_transparent.webm",
|
11 |
+
"Swirl_transparent.webm",
|
12 |
+
"Snow_transparent.webm",
|
13 |
+
"Likes_transparent.webm",
|
14 |
+
"Lightning_transparent.webm",
|
15 |
+
"Happy_transparent.webm",
|
16 |
+
"Fire_transparent.webm",
|
17 |
+
"CurlingWave_transparent.webm",
|
18 |
+
"Cloud_transparent.webm",
|
19 |
+
]
|
20 |
+
|
21 |
+
|
22 |
+
class Asset(BaseModel):
|
23 |
+
type: str
|
24 |
+
sequence: List[Dict]
|
25 |
+
|
26 |
+
@validator("sequence")
|
27 |
+
def check_duration(cls, sequence):
|
28 |
+
for item in sequence:
|
29 |
+
start = item.get("start", 0)
|
30 |
+
end = item.get("end", 0)
|
31 |
+
duration = (end - start) * 30 # Assuming 30 fps
|
32 |
+
if duration <= 1:
|
33 |
+
raise ValueError("Asset duration must be greater than 1 frame.")
|
34 |
+
return sequence
|
35 |
+
|
36 |
+
|
37 |
+
class Link(BaseModel):
|
38 |
+
file_name: str
|
39 |
+
link: str
|
40 |
+
|
41 |
+
|
42 |
+
class Remotion(BaseModel):
|
43 |
+
links: List[Link] = []
|
44 |
+
assets: List[Asset] = []
|
45 |
+
constants: Dict[str, int] = {"duration": 3840, "height": 1920, "width": 1080}
|
46 |
+
scenes: int
|
47 |
+
total_duration: int
|
48 |
+
|
49 |
+
def generate_image_links(self):
|
50 |
+
for i in range(self.scenes):
|
51 |
+
image_file_name = f"image_{i}.png"
|
52 |
+
image_link = f"https://image.lexica.art/full_webp/{i}.png"
|
53 |
+
self.links.append(Link(file_name=image_file_name, link=image_link))
|
54 |
+
|
55 |
+
|
56 |
+
def concatenate_wav_files(input):
|
57 |
+
# Initialize variables for output file
|
58 |
+
output = None
|
59 |
+
output_params = None
|
60 |
+
output_file = ''
|
61 |
+
try:
|
62 |
+
# Open output file for writing
|
63 |
+
output = wave.open(output_file, "wb")
|
64 |
+
|
65 |
+
# Loop through input files
|
66 |
+
for input_file in input_files:
|
67 |
+
with wave.open(input_file, "rb") as input_wav:
|
68 |
+
# If this is the first input file, set output file parameters
|
69 |
+
if output_params is None:
|
70 |
+
output_params = input_wav.getparams()
|
71 |
+
output.setparams(output_params)
|
72 |
+
# Otherwise, ensure consistency of parameters
|
73 |
+
else:
|
74 |
+
if input_wav.getparams() != output_params:
|
75 |
+
raise ValueError(
|
76 |
+
"Input file parameters do not match output file parameters."
|
77 |
+
)
|
78 |
+
|
79 |
+
# Read data from input file and write to output file
|
80 |
+
output.writeframes(input_wav.readframes(input_wav.getnframes()))
|
81 |
+
finally:
|
82 |
+
# Close output file
|
83 |
+
if output is not None:
|
84 |
+
output.close()
|
85 |
+
|
86 |
+
|
87 |
+
# # Example usage
|
88 |
+
# input_files = ["file1.wav", "file2.wav", "file3.wav"]
|
89 |
+
# output_file = "output.wav"
|
90 |
+
# concatenate_wav_files(input_files, output_file)
|
App/app.py
CHANGED
@@ -2,6 +2,7 @@ from fastapi import FastAPI, BackgroundTasks
|
|
2 |
from .Editor.editorRoutes import videditor_router
|
3 |
from App import bot
|
4 |
from App.utilis import WorkerClient, SERVER_STATE
|
|
|
5 |
|
6 |
app = FastAPI()
|
7 |
manager = WorkerClient()
|
@@ -22,3 +23,4 @@ def read_root():
|
|
22 |
|
23 |
|
24 |
app.include_router(videditor_router)
|
|
|
|
2 |
from .Editor.editorRoutes import videditor_router
|
3 |
from App import bot
|
4 |
from App.utilis import WorkerClient, SERVER_STATE
|
5 |
+
from .Generate.generatorRoutes import generator_router
|
6 |
|
7 |
app = FastAPI()
|
8 |
manager = WorkerClient()
|
|
|
23 |
|
24 |
|
25 |
app.include_router(videditor_router)
|
26 |
+
app.include_router(generator_router)
|
Remotion-app/JsonMaker.py
ADDED
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic import BaseModel
|
2 |
+
from typing import List, Optional
|
3 |
+
from pydantic import validator
|
4 |
+
import os, json
|
5 |
+
|
6 |
+
|
7 |
+
class Assets(BaseModel):
|
8 |
+
type: str
|
9 |
+
sequence: List[dict]
|
10 |
+
|
11 |
+
@validator("type")
|
12 |
+
def valid_type(cls, v):
|
13 |
+
if v not in ["video", "audio", "text", "image", "sfx", "background"]:
|
14 |
+
raise ValueError("Invalid asset type")
|
15 |
+
return v
|
16 |
+
|
17 |
+
|
18 |
+
def CreateFiles(assets: List[Assets], asset_dir: str):
|
19 |
+
for asset in assets:
|
20 |
+
filename = f"{asset.type.capitalize()}Sequences.json"
|
21 |
+
# Convert dictionary to JSON string
|
22 |
+
json_string = json.dumps(asset.sequence)
|
23 |
+
|
24 |
+
# Create directory if it doesn't exist
|
25 |
+
os.makedirs(asset_dir, exist_ok=True)
|
26 |
+
print(os.path.join(asset_dir, filename))
|
27 |
+
# Write JSON string to file
|
28 |
+
with open(os.path.join(asset_dir, filename), "w") as f:
|
29 |
+
f.write(json_string)
|
requirements.txt
CHANGED
@@ -20,4 +20,8 @@ asgiref
|
|
20 |
pipx
|
21 |
cryptg
|
22 |
aiohttp
|
23 |
-
|
|
|
|
|
|
|
|
|
|
20 |
pipx
|
21 |
cryptg
|
22 |
aiohttp
|
23 |
+
instructor==1.2.0
|
24 |
+
groq==0.5.0
|
25 |
+
python-multipart
|
26 |
+
orm[aiosqlite]
|
27 |
+
pydub
|