Carlexx commited on
Commit
b68c001
·
verified ·
1 Parent(s): db4c2fa

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -422
app.py DELETED
@@ -1,422 +0,0 @@
1
- # Euia-AducSdr: Uma implementação aberta e funcional da arquitetura ADUC-SDR para geração de vídeo coerente.
2
- # Copyright (C) 4 de Agosto de 2025 Carlos Rodrigues dos Santos
3
- #
4
- # Contato:
5
- # Carlos Rodrigues dos Santos
6
- # carlex22@gmail.com
7
- # Rua Eduardo Carlos Pereira, 4125, B1 Ap32, Curitiba, PR, Brazil, CEP 8102025
8
- #
9
- # Repositórios e Projetos Relacionados:
10
- # GitHub: https://github.com/carlex22/Aduc-sdr
11
- # Hugging Face: https://huggingface.co/spaces/Carlexx/Ltx-SuperTime-60Secondos/
12
- # Hugging Face: https://huggingface.co/spaces/Carlexxx/Novinho/
13
- #
14
- # Este programa é software livre: você pode redistribuí-lo e/ou modificá-lo
15
- # sob os termos da Licença Pública Geral Affero da GNU como publicada pela
16
- # Free Software Foundation, seja a versão 3 da Licença, ou
17
- # (a seu critério) qualquer versão posterior.
18
- #
19
- # Este programa é distribuído na esperança de que seja útil,
20
- # mas SEM QUALQUER GARANTIA; sem mesmo a garantia implícita de
21
- # COMERCIALIZAÇÃO ou ADEQUAÇÃO A UM DETERMINADO FIM. Consulte a
22
- # Licença Pública Geral Affero da GNU para mais detalhes.
23
- #
24
- # Você deve ter recebido uma cópia da Licença Pública Geral Affero da GNU
25
- # junto com este programa. Se não, veja <https://www.gnu.org/licenses/>.
26
-
27
- # --- app.py (NOVIM-5.4: O Painel de Controle do Diretor) ---
28
-
29
- # --- Ato 1: A Convocação da Orquestra (Importações) ---
30
- import gradio as gr
31
- import torch
32
- import os
33
- import yaml
34
- from PIL import Image, ImageOps, ExifTags
35
- import shutil
36
- import gc
37
- import subprocess
38
- import google.generativeai as genai
39
- import numpy as np
40
- import imageio
41
- from pathlib import Path
42
- import huggingface_hub
43
- import json
44
- import time
45
-
46
- from inference import create_ltx_video_pipeline, load_image_to_tensor_with_resize_and_crop, ConditioningItem, calculate_padding
47
- from dreamo_helpers import dreamo_generator_singleton
48
-
49
- # --- Ato 2: A Preparação do Palco (Configurações) ---
50
- config_file_path = "configs/ltxv-13b-0.9.8-distilled.yaml"
51
- with open(config_file_path, "r") as file: PIPELINE_CONFIG_YAML = yaml.safe_load(file)
52
-
53
- LTX_REPO = "Lightricks/LTX-Video"
54
- models_dir = "downloaded_models_gradio"
55
- Path(models_dir).mkdir(parents=True, exist_ok=True)
56
- WORKSPACE_DIR = "aduc_workspace"
57
- GEMINI_API_KEY = os.environ.get("GEMINI_API_KEY")
58
-
59
- VIDEO_FPS = 24
60
- TARGET_RESOLUTION = 420
61
-
62
- print("Criando pipelines LTX na CPU (estado de repouso)...")
63
- distilled_model_actual_path = huggingface_hub.hf_hub_download(repo_id=LTX_REPO, filename=PIPELINE_CONFIG_YAML["checkpoint_path"], local_dir=models_dir, local_dir_use_symlinks=False)
64
- pipeline_instance = create_ltx_video_pipeline(
65
- ckpt_path=distilled_model_actual_path,
66
- precision=PIPELINE_CONFIG_YAML["precision"],
67
- text_encoder_model_name_or_path=PIPELINE_CONFIG_YAML["text_encoder_model_name_or_path"],
68
- sampler=PIPELINE_CONFIG_YAML["sampler"],
69
- device='cpu'
70
- )
71
- print("Modelos LTX prontos (na CPU).")
72
-
73
-
74
- # --- Ato 3: As Partituras dos Músicos (Funções de Geração e Análise) ---
75
-
76
- def robust_json_parser(raw_text: str) -> dict:
77
- try:
78
- start_index = raw_text.find('{'); end_index = raw_text.rfind('}')
79
- if start_index != -1 and end_index != -1 and end_index > start_index:
80
- json_str = raw_text[start_index : end_index + 1]; return json.loads(json_str)
81
- else: raise ValueError("Nenhum objeto JSON válido encontrado na resposta da IA.")
82
- except json.JSONDecodeError as e: raise ValueError(f"Falha ao decodificar JSON: {e}")
83
-
84
- def extract_image_exif(image_path: str) -> str:
85
- try:
86
- img = Image.open(image_path); exif_data = img._getexif()
87
- if not exif_data: return "No EXIF metadata found."
88
- exif = { ExifTags.TAGS[k]: v for k, v in exif_data.items() if k in ExifTags.TAGS }
89
- relevant_tags = ['DateTimeOriginal', 'Model', 'LensModel', 'FNumber', 'ExposureTime', 'ISOSpeedRatings', 'FocalLength']
90
- metadata_str = ", ".join(f"{key}: {exif[key]}" for key in relevant_tags if key in exif)
91
- return metadata_str if metadata_str else "No relevant EXIF metadata found."
92
- except Exception: return "Could not read EXIF data."
93
-
94
- def run_storyboard_generation(num_fragments: int, prompt: str, initial_image_path: str):
95
- if not initial_image_path: raise gr.Error("Por favor, forneça uma imagem de referência inicial.")
96
- if not GEMINI_API_KEY: raise gr.Error("Chave da API Gemini não configurada!")
97
- exif_metadata = extract_image_exif(initial_image_path)
98
- prompt_file = "prompts/unified_storyboard_prompt.txt"
99
- with open(os.path.join(os.path.dirname(__file__), prompt_file), "r", encoding="utf-8") as f: template = f.read()
100
- director_prompt = template.format(user_prompt=prompt, num_fragments=int(num_fragments), image_metadata=exif_metadata)
101
- genai.configure(api_key=GEMINI_API_KEY)
102
- model = genai.GenerativeModel('gemini-1.5-flash'); img = Image.open(initial_image_path)
103
- print("Gerando roteiro com análise de visão integrada...")
104
- response = model.generate_content([director_prompt, img])
105
- try:
106
- storyboard_data = robust_json_parser(response.text)
107
- storyboard = storyboard_data.get("scene_storyboard", [])
108
- if not storyboard or len(storyboard) != int(num_fragments): raise ValueError(f"A IA não gerou o número correto de cenas. Esperado: {num_fragments}, Recebido: {len(storyboard)}")
109
- return storyboard
110
- except Exception as e: raise gr.Error(f"O Roteirista (Gemini) falhou ao criar o roteiro: {e}. Resposta recebida: {response.text}")
111
-
112
- def get_dreamo_prompt_for_transition(previous_image_path: str, target_scene_description: str) -> str:
113
- genai.configure(api_key=GEMINI_API_KEY)
114
- prompt_file = "prompts/img2img_evolution_prompt.txt"
115
- with open(os.path.join(os.path.dirname(__file__), prompt_file), "r", encoding="utf-8") as f: template = f.read()
116
- director_prompt = template.format(target_scene_description=target_scene_description)
117
- model = genai.GenerativeModel('gemini-1.5-flash'); img = Image.open(previous_image_path)
118
- response = model.generate_content([director_prompt, "Previous Image:", img])
119
- return response.text.strip().replace("\"", "")
120
-
121
- def run_keyframe_generation(storyboard, ref_images_tasks, progress=gr.Progress()):
122
- if not storyboard: raise gr.Error("Nenhum roteiro para gerar keyframes.")
123
- initial_ref_image_path = ref_images_tasks[0]['image']
124
- if not initial_ref_image_path or not os.path.exists(initial_ref_image_path): raise gr.Error("A imagem de referência principal (à esquerda) é obrigatória.")
125
-
126
- log_history = ""; generated_images_for_gallery = []
127
- try:
128
- pipeline_instance.to('cpu'); gc.collect(); torch.cuda.empty_cache()
129
- dreamo_generator_singleton.to_gpu()
130
- with Image.open(initial_ref_image_path) as img: width, height = (img.width // 32) * 32, (img.height // 32) * 32
131
-
132
- keyframe_paths, current_ref_image_path = [initial_ref_image_path], initial_ref_image_path
133
-
134
- for i, scene_description in enumerate(storyboard):
135
- progress(i / len(storyboard), desc=f"Pintando Keyframe {i+1}/{len(storyboard)}")
136
- log_history += f"\n--- PINTANDO KEYFRAME {i+1}/{len(storyboard)} ---\n"
137
- dreamo_prompt = get_dreamo_prompt_for_transition(current_ref_image_path, scene_description)
138
-
139
- reference_items = []
140
- fixed_references_basenames = [os.path.basename(item['image']) for item in ref_images_tasks if item['image']]
141
-
142
- for item in ref_images_tasks:
143
- if item['image']:
144
- reference_items.append({'image_np': np.array(Image.open(item['image']).convert("RGB")), 'task': item['task']})
145
-
146
- dynamic_references_paths = keyframe_paths[-3:]
147
- for ref_path in dynamic_references_paths:
148
- if os.path.basename(ref_path) not in fixed_references_basenames:
149
- reference_items.append({'image_np': np.array(Image.open(ref_path).convert("RGB")), 'task': 'ip'})
150
-
151
- log_history += f" - Roteiro: '{scene_description}'\n - Usando {len(reference_items)} referências visuais.\n - Prompt do D.A.: \"{dreamo_prompt}\"\n"
152
- yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=generated_images_for_gallery)}
153
-
154
- output_path = os.path.join(WORKSPACE_DIR, f"keyframe_{i+1}.png")
155
- image = dreamo_generator_singleton.generate_image_with_gpu_management(reference_items=reference_items, prompt=dreamo_prompt, width=width, height=height)
156
- image.save(output_path)
157
-
158
- keyframe_paths.append(output_path); generated_images_for_gallery.append(output_path); current_ref_image_path = output_path
159
- yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=generated_images_for_gallery)}
160
-
161
- except Exception as e: raise gr.Error(f"O Pintor (DreamO) ou Diretor de Arte (Gemini) falhou: {e}")
162
- finally: dreamo_generator_singleton.to_cpu(); gc.collect(); torch.cuda.empty_cache()
163
- log_history += "\nPintura de todos os keyframes concluída.\n"
164
- yield {keyframe_log_output: gr.update(value=log_history), keyframe_gallery_output: gr.update(value=generated_images_for_gallery), keyframe_images_state: keyframe_paths}
165
-
166
- def get_initial_motion_prompt(user_prompt: str, start_image_path: str, destination_image_path: str, dest_scene_desc: str):
167
- if not GEMINI_API_KEY: raise gr.Error("Chave da API Gemini não configurada!")
168
- try:
169
- genai.configure(api_key=GEMINI_API_KEY); model = genai.GenerativeModel('gemini-1.5-flash'); prompt_file = "prompts/initial_motion_prompt.txt"
170
- with open(os.path.join(os.path.dirname(__file__), prompt_file), "r", encoding="utf-8") as f: template = f.read()
171
- cinematographer_prompt = template.format(user_prompt=user_prompt, destination_scene_description=dest_scene_desc)
172
- start_img, dest_img = Image.open(start_image_path), Image.open(destination_image_path)
173
- model_contents = ["START Image:", start_img, "DESTINATION Image:", dest_img, cinematographer_prompt]
174
- response = model.generate_content(model_contents)
175
- return response.text.strip()
176
- except Exception as e: raise gr.Error(f"O Cineasta de IA (Inicial) falhou: {e}. Resposta: {getattr(e, 'text', 'No text available.')}")
177
-
178
- def get_dynamic_motion_prompt(user_prompt, story_history, memory_image_path, path_image_path, destination_image_path, path_scene_desc, dest_scene_desc):
179
- if not GEMINI_API_KEY: raise gr.Error("Chave da API Gemini não configurada!")
180
- try:
181
- genai.configure(api_key=GEMINI_API_KEY); model = genai.GenerativeModel('gemini-1.5-flash'); prompt_file = "prompts/dynamic_motion_prompt.txt"
182
- with open(os.path.join(os.path.dirname(__file__), prompt_file), "r", encoding="utf-8") as f: template = f.read()
183
- cinematographer_prompt = template.format(user_prompt=user_prompt, story_history=story_history, midpoint_scene_description=path_scene_desc, destination_scene_description=dest_scene_desc)
184
- mem_img, path_img, dest_img = Image.open(memory_image_path), Image.open(path_image_path), Image.open(destination_image_path)
185
- model_contents = ["START Image (Memory):", mem_img, "MIDPOINT Image (Path):", path_img, "DESTINATION Image (Destination):", dest_img, cinematographer_prompt]
186
- response = model.generate_content(model_contents)
187
- return response.text.strip()
188
- except Exception as e: raise gr.Error(f"O Cineasta de IA (Dinâmico) falhou: {e}. Resposta: {getattr(e, 'text', 'No text available.')}")
189
-
190
- def run_video_production(
191
- video_duration_seconds, video_fps, cut_frames_value, use_attention_slicing,
192
- mid_cond_frame, mid_cond_strength, end_cond_frame_offset, num_inference_steps,
193
- prompt_geral, keyframe_images_state, scene_storyboard, cfg,
194
- progress=gr.Progress()
195
- ):
196
- video_total_frames = int(video_duration_seconds * video_fps)
197
- if not keyframe_images_state or len(keyframe_images_state) < 3: raise gr.Error("Pinte pelo menos 2 keyframes para produzir uma transição.")
198
- log_history = "\n--- FASE 3/4: Iniciando Produção com Controles Manuais...\n"
199
- yield {production_log_output: log_history, video_gallery_glitch: []}
200
-
201
- end_cond_frame = video_total_frames - end_cond_frame_offset
202
- seed = int(time.time())
203
- target_device = 'cuda' if torch.cuda.is_available() else 'cpu'
204
- try:
205
- pipeline_instance.to(target_device)
206
- video_fragments, story_history = [], ""; kinetic_memory_path = None
207
- with Image.open(keyframe_images_state[1]) as img: width, height = img.size
208
-
209
- num_transitions = len(keyframe_images_state) - 2
210
- for i in range(num_transitions):
211
- fragment_num = i + 1
212
- progress(i / num_transitions, desc=f"Filmando Fragmento {fragment_num}/{num_transitions}")
213
- log_history += f"\n--- FRAGMENTO {fragment_num} ---\n"
214
-
215
- if i == 0:
216
- start_path, destination_path = keyframe_images_state[1], keyframe_images_state[2]
217
- dest_scene_desc = scene_storyboard[1]
218
- log_history += f" - Início (Big Bang): {os.path.basename(start_path)}\n - Destino: {os.path.basename(destination_path)}\n"
219
- current_motion_prompt = get_initial_motion_prompt(prompt_geral, start_path, destination_path, dest_scene_desc)
220
- conditioning_items_data = [(start_path, int(0), 1.0), (destination_path, int(end_cond_frame), 1.0)]
221
- else:
222
- memory_path, path_path, destination_path = kinetic_memory_path, keyframe_images_state[i+1], keyframe_images_state[i+2]
223
- path_scene_desc, dest_scene_desc = scene_storyboard[i], scene_storyboard[i+1]
224
- log_history += f" - Memória Cinética: {os.path.basename(memory_path)}\n - Caminho: {os.path.basename(path_path)}\n - Destino: {os.path.basename(destination_path)}\n"
225
- current_motion_prompt = get_dynamic_motion_prompt(prompt_geral, story_history, memory_path, path_path, destination_path, path_scene_desc, dest_scene_desc)
226
- conditioning_items_data = [(memory_path, int(0), 1.0), (path_path, int(mid_cond_frame), mid_cond_strength), (destination_path, int(end_cond_frame), 1.0)]
227
-
228
- story_history += f"\n- Ato {fragment_num + 1}: {current_motion_prompt}"
229
- log_history += f" - Instrução do Cineasta: '{current_motion_prompt}'\n"; yield {production_log_output: log_history}
230
- full_fragment_path, _ = run_ltx_animation(
231
- current_fragment_index=fragment_num, motion_prompt=current_motion_prompt,
232
- conditioning_items_data=conditioning_items_data, width=width, height=height,
233
- seed=seed, cfg=cfg, progress=progress,
234
- video_total_frames=video_total_frames, video_fps=video_fps,
235
- use_attention_slicing=use_attention_slicing, num_inference_steps=num_inference_steps
236
- )
237
-
238
- is_last_fragment = (i == num_transitions - 1)
239
- if is_last_fragment:
240
- final_fragment_path = full_fragment_path
241
- log_history += " - Último fragmento gerado, mantendo a duração total para um final limpo.\n"
242
- else:
243
- final_fragment_path = os.path.join(WORKSPACE_DIR, f"fragment_{fragment_num}_trimmed.mp4")
244
- trim_video_to_frames(full_fragment_path, final_fragment_path, int(cut_frames_value))
245
- eco_output_path = os.path.join(WORKSPACE_DIR, f"eco_from_frag_{fragment_num}.png")
246
- kinetic_memory_path = extract_last_frame_as_image(final_fragment_path, eco_output_path)
247
- log_history += f" - Gerado e cortado. Novo Eco Dinâmico criado: {os.path.basename(kinetic_memory_path)}\n"
248
-
249
- video_fragments.append(final_fragment_path)
250
- yield {production_log_output: log_history, video_gallery_glitch: video_fragments}
251
-
252
- progress(1.0, desc="Produção Concluída.")
253
- yield {production_log_output: log_history, video_gallery_glitch: video_fragments, fragment_list_state: video_fragments}
254
- finally:
255
- pipeline_instance.to('cpu'); gc.collect(); torch.cuda.empty_cache()
256
-
257
- def process_image_to_square(image_path: str, size: int = TARGET_RESOLUTION) -> str:
258
- if not image_path: return None
259
- try:
260
- img = Image.open(image_path).convert("RGB"); img_square = ImageOps.fit(img, (size, size), Image.Resampling.LANCZOS)
261
- output_path = os.path.join(WORKSPACE_DIR, f"initial_ref_{size}x{size}.png"); img_square.save(output_path)
262
- return output_path
263
- except Exception as e: raise gr.Error(f"Falha ao processar a imagem de referência: {e}")
264
-
265
- def load_conditioning_tensor(media_path: str, height: int, width: int) -> torch.Tensor:
266
- return load_image_to_tensor_with_resize_and_crop(media_path, height, width)
267
-
268
- def run_ltx_animation(
269
- current_fragment_index, motion_prompt, conditioning_items_data,
270
- width, height, seed, cfg, progress,
271
- video_total_frames, video_fps, use_attention_slicing, num_inference_steps
272
- ):
273
- progress(0, desc=f"[Câmera LTX] Filmando Cena {current_fragment_index}...");
274
- output_path = os.path.join(WORKSPACE_DIR, f"fragment_{current_fragment_index}_full.mp4"); target_device = 'cuda' if torch.cuda.is_available() else 'cpu'
275
- try:
276
- if use_attention_slicing: pipeline_instance.enable_attention_slicing()
277
- conditioning_items = [ConditioningItem(load_conditioning_tensor(p, height, width).to(target_device), s, t) for p, s, t in conditioning_items_data]
278
- actual_num_frames = int(round((float(video_total_frames) - 1.0) / 8.0) * 8 + 1)
279
- padded_h, padded_w = ((height - 1) // 32 + 1) * 32, ((width - 1) // 32 + 1) * 32
280
- padding_vals = calculate_padding(height, width, padded_h, padded_w)
281
- for item in conditioning_items: item.media_item = torch.nn.functional.pad(item.media_item, padding_vals)
282
-
283
- first_pass_config = PIPELINE_CONFIG_YAML.get("first_pass", {}).copy()
284
- first_pass_config['num_inference_steps'] = int(num_inference_steps)
285
-
286
- kwargs = {"prompt": motion_prompt, "negative_prompt": "blurry, distorted, bad quality, artifacts", "height": padded_h, "width": padded_w, "num_frames": actual_num_frames, "frame_rate": video_fps, "generator": torch.Generator(device=target_device).manual_seed(int(seed) + current_fragment_index), "output_type": "pt", "guidance_scale": float(cfg), "timesteps": first_pass_config.get("timesteps"), "conditioning_items": conditioning_items, "decode_timestep": PIPELINE_CONFIG_YAML.get("decode_timestep"), "decode_noise_scale": PIPELINE_CONFIG_YAML.get("decode_noise_scale"), "stochastic_sampling": PIPELINE_CONFIG_YAML.get("stochastic_sampling"), "image_cond_noise_scale": 0.15, "is_video": True, "vae_per_channel_normalize": True, "mixed_precision": (PIPELINE_CONFIG_YAML.get("precision") == "mixed_precision"), "enhance_prompt": False, "decode_every": 4, "num_inference_steps": int(num_inference_steps)}
287
-
288
- result_tensor = pipeline_instance(**kwargs).images
289
-
290
- pad_l, pad_r, pad_t, pad_b = map(int, padding_vals); slice_h = -pad_b if pad_b > 0 else None; slice_w = -pad_r if pad_r > 0 else None
291
- cropped_tensor = result_tensor[:, :, :video_total_frames, pad_t:slice_h, pad_l:slice_w]; video_np = (cropped_tensor[0].permute(1, 2, 3, 0).cpu().float().numpy() * 255).astype(np.uint8)
292
- with imageio.get_writer(output_path, fps=video_fps, codec='libx264', quality=8) as writer:
293
- for i, frame in enumerate(video_np): writer.append_data(frame)
294
- return output_path, actual_num_frames
295
- finally:
296
- if use_attention_slicing: pipeline_instance.disable_attention_slicing()
297
-
298
- def trim_video_to_frames(input_path: str, output_path: str, frames_to_keep: int) -> str:
299
- try:
300
- subprocess.run(f"ffmpeg -y -v error -i \"{input_path}\" -vf \"select='lt(n,{frames_to_keep})'\" -an \"{output_path}\"", shell=True, check=True, text=True)
301
- return output_path
302
- except subprocess.CalledProcessError as e: raise gr.Error(f"FFmpeg falhou ao cortar vídeo: {e.stderr}")
303
-
304
- def extract_last_frame_as_image(video_path: str, output_image_path: str) -> str:
305
- try:
306
- subprocess.run(f"ffmpeg -y -v error -sseof -1 -i \"{video_path}\" -update 1 -q:v 1 \"{output_image_path}\"", shell=True, check=True, text=True)
307
- return output_image_path
308
- except subprocess.CalledProcessError as e: raise gr.Error(f"FFmpeg falhou ao extrair último frame: {e.stderr}")
309
-
310
- def concatenate_and_trim_masterpiece(fragment_paths: list, progress=gr.Progress()):
311
- if not fragment_paths: raise gr.Error("Nenhum fragmento de vídeo para concatenar.")
312
- progress(0.5, desc="Montando a obra-prima final...");
313
- try:
314
- list_file_path = os.path.join(WORKSPACE_DIR, "concat_list.txt"); final_output_path = os.path.join(WORKSPACE_DIR, "masterpiece_final.mp4")
315
- with open(list_file_path, "w") as f:
316
- for p in fragment_paths: f.write(f"file '{os.path.abspath(p)}'\n")
317
- subprocess.run(f"ffmpeg -y -v error -f concat -safe 0 -i \"{list_file_path}\" -c copy \"{final_output_path}\"", shell=True, check=True, text=True)
318
- progress(1.0, desc="Montagem concluída!")
319
- return final_output_path
320
- except subprocess.CalledProcessError as e: raise gr.Error(f"FFmpeg falhou na concatenação final: {e.stderr}")
321
-
322
- with gr.Blocks(theme=gr.themes.Soft()) as demo:
323
- gr.Markdown("# NOVIM-5.4 (Painel de Controle do Diretor)\n*By Carlex & Gemini & DreamO*")
324
- if os.path.exists(WORKSPACE_DIR): shutil.rmtree(WORKSPACE_DIR)
325
- os.makedirs(WORKSPACE_DIR); Path("prompts").mkdir(exist_ok=True)
326
-
327
- scene_storyboard_state, keyframe_images_state, fragment_list_state = gr.State([]), gr.State([]), gr.State([])
328
- prompt_geral_state, processed_ref_path_state = gr.State(""), gr.State("")
329
-
330
- gr.Markdown("--- \n ## ETAPA 1: O ROTEIRO (IA Roteirista)")
331
- with gr.Row():
332
- with gr.Column(scale=1):
333
- prompt_input = gr.Textbox(label="Ideia Geral (Prompt)")
334
- num_fragments_input = gr.Slider(2, 5, 4, step=1, label="Número de Atos (Keyframes)")
335
- image_input = gr.Image(type="filepath", label=f"Imagem de Referência Principal (será {TARGET_RESOLUTION}x{TARGET_RESOLUTION})")
336
- director_button = gr.Button("▶️ 1. Gerar Roteiro", variant="primary")
337
- with gr.Column(scale=2): storyboard_to_show = gr.JSON(label="Roteiro de Cenas Gerado (em Inglês)")
338
-
339
- gr.Markdown("--- \n ## ETAPA 2: OS KEYFRAMES (IA Pintor & Diretor de Arte)")
340
- with gr.Row():
341
- with gr.Column(scale=2):
342
- gr.Markdown("Forneça referências para guiar a IA. A Principal é obrigatória. A Secundária é opcional (ex: para estilo ou uma segunda pessoa).")
343
- with gr.Row():
344
- with gr.Column():
345
- ref1_image = gr.Image(label="Referência Principal (Conteúdo/ID)", type="filepath")
346
- ref1_task = gr.Dropdown(choices=["ip", "id", "style"], value="ip", label="Tarefa da Ref. Principal")
347
- with gr.Column():
348
- ref2_image = gr.Image(label="Referência Secundária (Opcional)", type="filepath")
349
- ref2_task = gr.Dropdown(choices=["ip", "id", "style"], value="style", label="Tarefa da Ref. Secundária")
350
- photographer_button = gr.Button("▶️ 2. Pintar Imagens-Chave em Cadeia", variant="primary")
351
- with gr.Column(scale=1):
352
- keyframe_log_output = gr.Textbox(label="Diário de Bordo do Pintor", lines=15, interactive=False)
353
- keyframe_gallery_output = gr.Gallery(label="Imagens-Chave Pintadas", object_fit="contain", height="auto", type="filepath", interactive=True)
354
-
355
- gr.Markdown("--- \n ## ETAPA 3: A PRODUÇÃO (IA Cineasta & Câmera)")
356
- with gr.Row():
357
- with gr.Column(scale=1):
358
- cfg_slider = gr.Slider(1.0, 10.0, 7.5, step=0.1, label="CFG")
359
- end_cond_strength_slider = gr.Slider(label="Força de Convergência do Destino", minimum=0.1, maximum=1.0, value=1.0, step=0.05)
360
- with gr.Accordion("Controles Avançados de Timing e Performance", open=False):
361
- video_duration_slider = gr.Slider(label="Duração da Cena (segundos)", minimum=2.0, maximum=10.0, value=4.0, step=0.5)
362
- video_fps_slider = gr.Slider(label="FPS do Vídeo", minimum=12, maximum=36, value=VIDEO_FPS, step=1)
363
- num_inference_steps_slider = gr.Slider(label="Etapas de Inferência", minimum=10, maximum=50, value=30, step=1)
364
- cut_frames_slider = gr.Slider(label="Ponto de Corte do Eco (Frames)", minimum=30, maximum=300, value=150, step=1)
365
- slicing_checkbox = gr.Checkbox(label="Usar Attention Slicing (Economiza VRAM)", value=True)
366
- gr.Markdown("---"); gr.Markdown("#### Controles de Condicionamento")
367
- mid_cond_frame_slider = gr.Slider(label="Frame do 'Caminho'", minimum=1, maximum=300, value=54, step=1)
368
- mid_cond_strength_slider = gr.Slider(label="Força do 'Caminho'", minimum=0.1, maximum=1.0, value=0.5, step=0.05)
369
- end_cond_offset_slider = gr.Slider(label="Offset de Convergência do 'Destino' (frames do fim)", minimum=1, maximum=48, value=8, step=1)
370
- animator_button = gr.Button("▶️ 3. Produzir Cenas (Handoff Cinético)", variant="primary")
371
- production_log_output = gr.Textbox(label="Diário de Bordo da Produção", lines=15, interactive=False)
372
- with gr.Column(scale=1): video_gallery_glitch = gr.Gallery(label="Fragmentos Gerados", object_fit="contain", height="auto", type="video")
373
-
374
- gr.Markdown(f"--- \n ## ETAPA 4: PÓS-PRODUÇÃO (Editor)")
375
- editor_button = gr.Button("▶️ 4. Montar Vídeo Final", variant="primary")
376
- final_video_output = gr.Video(label="A Obra-Prima Final", width=TARGET_RESOLUTION)
377
-
378
- # --- Ato 6: A Regência (Lógica de Conexão dos Botões) ---
379
- def process_and_update_storyboard(num_fragments, prompt, image_path):
380
- processed_path = process_image_to_square(image_path)
381
- if not processed_path: raise gr.Error("A imagem de referência é inválida ou não foi fornecida.")
382
- storyboard = run_storyboard_generation(num_fragments, prompt, processed_path)
383
- return storyboard, prompt, processed_path, storyboard, processed_path
384
-
385
- director_button.click(
386
- fn=process_and_update_storyboard,
387
- inputs=[num_fragments_input, prompt_input, image_input],
388
- outputs=[scene_storyboard_state, prompt_geral_state, processed_ref_path_state, storyboard_to_show, ref1_image]
389
- )
390
-
391
- def run_keyframe_generation_wrapper(storyboard, ref1_img, ref1_tsk, ref2_img, ref2_tsk, progress=gr.Progress()):
392
- ref_data = [{'image': ref1_img, 'task': ref1_tsk}, {'image': ref2_img, 'task': ref2_tsk}]
393
- final_update = {}
394
- for update in run_keyframe_generation(storyboard, ref_data, progress):
395
- final_update = update
396
- return final_update.get('keyframe_log_output', ''), final_update.get('keyframe_gallery_output', []), final_update.get('keyframe_images_state', [])
397
-
398
- photographer_button.click(
399
- fn=run_keyframe_generation_wrapper,
400
- inputs=[scene_storyboard_state, ref1_image, ref1_task, ref2_image, ref2_task],
401
- outputs=[keyframe_log_output, keyframe_gallery_output, keyframe_images_state]
402
- )
403
-
404
- animator_button.click(
405
- fn=run_video_production,
406
- inputs=[
407
- video_duration_slider, video_fps_slider, cut_frames_slider, slicing_checkbox,
408
- mid_cond_frame_slider, mid_cond_strength_slider, end_cond_offset_slider,
409
- num_inference_steps_slider,
410
- prompt_geral_state, keyframe_images_state, scene_storyboard_state, cfg_slider
411
- ],
412
- outputs=[production_log_output, video_gallery_glitch, fragment_list_state]
413
- )
414
-
415
- editor_button.click(
416
- fn=concatenate_and_trim_masterpiece,
417
- inputs=[fragment_list_state],
418
- outputs=[final_video_output]
419
- )
420
-
421
- if __name__ == "__main__":
422
- demo.queue().launch(server_name="0.0.0.0", share=True)