Spaces:
Paused
Paused
'''import sys | |
import asyncio | |
from aiohttp import web, WSMsgType | |
import json | |
from json import JSONEncoder | |
import numpy as np | |
import uuid | |
import logging | |
import os | |
import signal | |
from typing import Dict, Any, List, Optional | |
import base64 | |
import io | |
from PIL import Image | |
import gradio as gr | |
import cv2 | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') | |
logger = logging.getLogger(__name__) | |
def SIGSEGV_signal_arises(signalNum, stack): | |
logger.critical(f"{signalNum} : SIGSEGV arises") | |
logger.critical(f"Stack trace: {stack}") | |
signal.signal(signal.SIGSEGV, SIGSEGV_signal_arises) | |
from loader import initialize_models | |
from engine import Engine, base64_data_uri_to_PIL_Image | |
from pathlib import Path | |
import cv2 | |
# Global constants | |
DATA_ROOT = os.environ.get('DATA_ROOT', '/tmp/data') | |
MODELS_DIR = os.path.join(DATA_ROOT, "models") | |
async def setup(): | |
live_portrait = await initialize_models() | |
engine = Engine(live_portrait=live_portrait) | |
def get_all_frames(video_path): | |
cap = cv2.VideoCapture(video_path) | |
frames = [] | |
while True: | |
ret, frame = cap.read() | |
if not ret: | |
break | |
frames.append(frame) | |
cap.release() | |
return frames | |
async def return_image(image): | |
binary_data = Path(image).read_bytes() | |
res = await engine.load_image(binary_data) | |
id = res['u'] | |
_, image = await engine.transform_image(id, { | |
"aaa": -10, | |
"eee": -10, | |
"woo": -12 | |
}) | |
return image | |
async def return_video(video): | |
print(video) | |
gr.Info("Extracting frames..") | |
frames = get_all_frames(video) | |
gr.Info("Loading frames..") | |
res = await engine.load_frames(frames) | |
id = res['u'] | |
height, width, _ = frames[0].shape | |
output_file = "output_video.mp4" | |
fourcc = cv2.VideoWriter_fourcc(*'mp4v') | |
video_writer = cv2.VideoWriter(output_file, fourcc, 24.0, (width, height)) | |
gr.Info("Processing..") | |
async for image in engine.transform_video(id, { | |
"aaa": -10, | |
"eee": -10, | |
"woo": -12 | |
}): | |
bgr_frame = cv2.cvtColor(np.array(image), cv2.COLOR_BGR2RGB) | |
video_writer.write(cv2.cvtColor(bgr_frame, cv2.COLOR_BGR2RGB)) | |
video_writer.release() | |
return output_file | |
with gr.Blocks(title="Retorno de Imagem") as interface: | |
gr.Markdown("## 📼 Conversor de Vídeo para Imagem") | |
with gr.Row(): # Organiza os componentes em uma linha | |
video_input = gr.Video(label="Carregue seu vídeo") | |
image_output = gr.Video(label="Imagem Processada",) | |
submit_btn = gr.Button("🔁 Processar", variant="primary") # Estilo primário | |
submit_btn.click( | |
fn=return_video, # Sua função de processamento | |
inputs=video_input, | |
outputs=image_output, | |
) | |
interface.launch(share=True) | |
if __name__ == "__main__": | |
asyncio.run(setup())''' | |
import sys | |
import asyncio | |
from aiohttp import web, WSMsgType | |
import json | |
from json import JSONEncoder | |
import numpy as np | |
import uuid | |
import logging | |
import os | |
import signal | |
from typing import Dict, Any, List, Optional | |
import base64 | |
import io | |
from PIL import Image | |
import gradio as gr | |
import cv2 | |
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s') | |
logger = logging.getLogger(__name__) | |
def SIGSEGV_signal_arises(signalNum, stack): | |
logger.critical(f"{signalNum} : SIGSEGV arises") | |
logger.critical(f"Stack trace: {stack}") | |
signal.signal(signal.SIGSEGV, SIGSEGV_signal_arises) | |
from loader import initialize_models | |
from engine import Engine, base64_data_uri_to_PIL_Image | |
from pathlib import Path | |
import cv2 | |
# Global constants | |
DATA_ROOT = os.environ.get('DATA_ROOT', '/tmp/data') | |
MODELS_DIR = os.path.join(DATA_ROOT, "models") | |
async def setup(): | |
live_portrait = await initialize_models() | |
engine = Engine(live_portrait=live_portrait) | |
async def return_video(video): | |
gr.Info("Processing video..") | |
output = await engine.process_video(video, { | |
"aaa": -10, | |
"eee": -10, | |
"woo": -12 | |
}) | |
return output | |
interface = gr.Interface( | |
fn=return_video, # Your function to process video | |
inputs=gr.Video(label="Carregue seu vídeo"), | |
outputs=gr.Video(label="Imagem Processada"), | |
title="Retorno de Imagem", | |
description="📼 Conversor de Vídeo para Imagem" | |
) | |
interface.launch(share=True) | |
if __name__ == "__main__": | |
asyncio.run(setup()) |