video2 / app.py
lalashechka's picture
Update app.py
bbc3e9e
raw
history blame
No virus
7.62 kB
import gradio as gr
import requests
import time
import json
from contextlib import closing
from websocket import create_connection
from deep_translator import GoogleTranslator
from langdetect import detect
import os
from PIL import Image
import io
import base64
import os
import random
import tempfile
def animate_img(encoded_string):
r = requests.post("https://stable-video-diffusion.com/api/upload", files={"file": open(encoded_string, 'rb')})
print(r.text)
hash_ = r.json()['hash']
time.sleep(10)
while True:
r2 = requests.get(f"https://stable-video-diffusion.com/result?hash={hash_}")
source_string = r2.text
if "Generation has been in progress for" in source_string:
time.sleep(10)
continue
if "Generation has been in progress for" not in source_string:
pattern = r'https://storage.stable-video-diffusion.com/([a-f0-9]{32})\.mp4'
matches = re.findall(pattern, source_string)
sd_video = []
for match in matches:
sd_video.append(f"https://storage.stable-video-diffusion.com/{match}.mp4")
print(sd_video[0])
return sd_video[0]
def flip_text(prompt, motion):
try:
language = detect(prompt)
if language == 'ru':
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
print(prompt)
except:
prompt = 'video'
url_video_g = os.getenv("url_video_g")
url_video_c = os.getenv("url_video_c")
if motion == "Приближение →←":
motion = 'zoom in'
if motion == "Отдаление ←→":
motion = 'zoom out'
if motion == "Вверх ↑":
motion = 'up'
if motion == "Вниз ↓":
motion = 'down'
if motion == "Влево ←":
motion = 'left'
if motion == "Вправо →":
motion = 'right'
if motion == "По часовой стрелке ⟳":
motion = 'rotate cw'
if motion == "Против часовой стрелки ⟲":
motion = 'rotate ccw'
data = {"prompt": f"{prompt}","image": "null", "denoise": 0.75,"motion": motion}
r = requests.post(f"{url_video_g}", json=data)
while True:
data2 = {"task_id": f"{r.json()['task_id']}"}
r2 = requests.post(f"{url_video_c}", json=data2)
time.sleep(3)
try:
if r2.json()['status'] == "QUEUED":
continue
if r2.json()['status'] == "PROCESSING":
continue
except:
try:
n_im2 = f"{time.time()}"
with tempfile.NamedTemporaryFile(prefix=f'aaafff{n_im2}', suffix='.mp4', delete=False) as file:
for chunk in r2.iter_content(chunk_size=1024):
if chunk:
file.write(chunk)
return file.name
except Exception as e:
print(e)
break
def flip_text2(encoded_string, prompt, motion):
url_video_g = os.getenv("url_video_g")
url_video_c = os.getenv("url_video_c")
try:
language = detect(prompt)
if language == 'ru':
prompt = GoogleTranslator(source='ru', target='en').translate(prompt)
print(prompt)
except:
pass
if motion == "Приближение →←":
motion = 'zoom in'
if motion == "Отдаление ←→":
motion = 'zoom out'
if motion == "Вверх ↑":
motion = 'up'
if motion == "Вниз ↓":
motion = 'down'
if motion == "Влево ←":
motion = 'left'
if motion == "Вправо →":
motion = 'right'
if motion == "По часовой стрелке ⟳":
motion = 'rotate cw'
if motion == "Против часовой стрелки ⟲":
motion = 'rotate ccw'
with open(encoded_string, "rb") as image_file:
encoded_string2 = base64.b64encode(image_file.read())
encoded_string2 = str(encoded_string2).replace("b'", '')
data = {"prompt": f"{prompt}","image": f"{encoded_string2}","denoise":0.75,"motion": motion}
r = requests.post(f"{url_video_g}", json=data)
while True:
data2 = {"task_id": f"{r.json()['task_id']}"}
r2 = requests.post(f"{url_video_c}", json=data2)
time.sleep(3)
try:
if r2.json()['status'] == "QUEUED":
continue
if r2.json()['status'] == "PROCESSING":
continue
except:
try:
n_im2 = f"{time.time()}"
with tempfile.NamedTemporaryFile(prefix=f'aaafff{n_im2}', suffix='.mp4', delete=False) as file:
for chunk in r2.iter_content(chunk_size=1024):
if chunk:
file.write(chunk)
return file.name
except Exception as e:
print(e)
break
css = """
#generate {
width: 100%;
background: #e253dd !important;
border: none;
border-radius: 50px;
outline: none !important;
color: white;
}
#generate:hover {
background: #de6bda !important;
outline: none !important;
color: #fff;
}
footer {visibility: hidden !important;}
"""
with gr.Blocks(css=css) as demo:
with gr.Tab("Сгенерировать видео"):
with gr.Column():
prompt = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание:', lines=3)
motion1 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=[
"Приближение →←", "Отдаление ←→", "Вверх ↑", "Вниз ↓", "Влево ←", "Вправо →", "По часовой стрелке ⟳", "Против часовой стрелки ⟲"])
with gr.Column():
text_button = gr.Button("Сгенерировать видео", variant='primary', elem_id="generate")
with gr.Column():
video_output = gr.Video(show_label=True, label='Результат:', type="file")
text_button.click(flip_text, inputs=[prompt, motion1], outputs=video_output)
with gr.Tab("Анимировать изображение"):
with gr.Column():
prompt2 = gr.Image(show_label=True, interactive=True, type='filepath', label='Исходное изображение:')
prompt12 = gr.Textbox(placeholder="Введите описание видео...", show_label=True, label='Описание видео (опционально):', lines=3)
motion2 = gr.Dropdown(value="Приближение →←", interactive=True, show_label=True, label="Движение камеры:", choices=[
"Приближение →←", "Отдаление ←→", "Вверх ↑", "Вниз ↓", "Влево ←", "Вправо →", "По часовой стрелке ⟳", "Против часовой стрелки ⟲"])
with gr.Column():
text_button2 = gr.Button("Анимировать изображение", variant='primary', elem_id="generate")
with gr.Column():
video_output2 = gr.Video(show_label=True, label='Результат:', type="file")
text_button2.click(animate_img, inputs=[prompt2], outputs=video_output2)
demo.queue(concurrency_count=12)
demo.launch()