|
|
import os |
|
|
import sys |
|
|
from datetime import datetime |
|
|
|
|
|
sys.path.append( |
|
|
os.path.abspath( |
|
|
os.path.realpath( |
|
|
os.path.join(os.path.dirname(__file__), "./submodules/FramePack") |
|
|
) |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
import asyncio |
|
|
|
|
|
if sys.platform in ("win32", "cygwin"): |
|
|
asyncio.set_event_loop_policy(asyncio.WindowsSelectorEventLoopPolicy()) |
|
|
|
|
|
sys.path.append( |
|
|
os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), "../"))) |
|
|
) |
|
|
|
|
|
|
|
|
from eichi_utils.vae_cache import vae_decode_cache |
|
|
|
|
|
|
|
|
vae_cache_enabled = False |
|
|
current_prompt = None |
|
|
|
|
|
import random |
|
|
import time |
|
|
import traceback |
|
|
|
|
|
import argparse |
|
|
|
|
|
import sys |
|
|
|
|
|
sys.path.append(os.path.abspath(os.path.dirname(__file__))) |
|
|
from eichi_utils.combine_mode import ( |
|
|
COMBINE_MODE, |
|
|
COMBINE_MODE_OPTIONS, |
|
|
COMBINE_MODE_OPTIONS_KEYS, |
|
|
COMBINE_MODE_DEFAULT, |
|
|
get_combine_mode, |
|
|
) |
|
|
|
|
|
from eichi_utils.png_metadata import ( |
|
|
embed_metadata_to_png, |
|
|
extract_metadata_from_png, |
|
|
extract_metadata_from_numpy_array, |
|
|
PROMPT_KEY, |
|
|
SEED_KEY, |
|
|
SECTION_PROMPT_KEY, |
|
|
SECTION_NUMBER_KEY, |
|
|
) |
|
|
|
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument("--share", action="store_true") |
|
|
parser.add_argument("--server", type=str, default="127.0.0.1") |
|
|
parser.add_argument("--port", type=int, default=8001) |
|
|
parser.add_argument("--inbrowser", action="store_true") |
|
|
parser.add_argument("--lang", type=str, default="ja", help="Language: ja, zh-tw, en") |
|
|
args = parser.parse_args() |
|
|
|
|
|
|
|
|
from locales.i18n_extended import set_lang, translate |
|
|
|
|
|
set_lang(args.lang) |
|
|
|
|
|
try: |
|
|
import winsound |
|
|
|
|
|
HAS_WINSOUND = True |
|
|
except ImportError: |
|
|
HAS_WINSOUND = False |
|
|
import traceback |
|
|
|
|
|
if "HF_HOME" not in os.environ: |
|
|
os.environ["HF_HOME"] = os.path.abspath( |
|
|
os.path.realpath(os.path.join(os.path.dirname(__file__), "../hf_download")) |
|
|
) |
|
|
print(translate("HF_HOMEを設定: {0}").format(os.environ["HF_HOME"])) |
|
|
else: |
|
|
print(translate("既存のHF_HOMEを使用: {0}").format(os.environ["HF_HOME"])) |
|
|
|
|
|
temp_dir = "./temp_for_zip_section_info" |
|
|
|
|
|
|
|
|
has_lora_support = False |
|
|
try: |
|
|
import lora_utils |
|
|
|
|
|
has_lora_support = True |
|
|
print(translate("LoRAサポートが有効です")) |
|
|
except ImportError: |
|
|
print( |
|
|
translate( |
|
|
"LoRAサポートが無効です(lora_utilsモジュールがインストールされていません)" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
import os.path |
|
|
from eichi_utils.video_mode_settings import ( |
|
|
VIDEO_MODE_SETTINGS, |
|
|
get_video_modes, |
|
|
get_video_seconds, |
|
|
get_important_keyframes, |
|
|
get_copy_targets, |
|
|
get_max_keyframes_count, |
|
|
get_total_sections, |
|
|
generate_keyframe_guide_html, |
|
|
handle_mode_length_change, |
|
|
process_keyframe_change, |
|
|
MODE_TYPE_NORMAL, |
|
|
MODE_TYPE_LOOP, |
|
|
) |
|
|
|
|
|
|
|
|
import cv2 |
|
|
from eichi_utils.settings_manager import ( |
|
|
get_settings_file_path, |
|
|
get_output_folder_path, |
|
|
initialize_settings, |
|
|
load_settings, |
|
|
save_settings, |
|
|
open_output_folder, |
|
|
) |
|
|
|
|
|
|
|
|
from eichi_utils.preset_manager import ( |
|
|
initialize_presets, |
|
|
load_presets, |
|
|
get_default_startup_prompt, |
|
|
save_preset, |
|
|
delete_preset, |
|
|
) |
|
|
|
|
|
|
|
|
from eichi_utils.keyframe_handler_extended import extended_mode_length_change_handler |
|
|
import gradio as gr |
|
|
|
|
|
|
|
|
from eichi_utils.ui_styles import get_app_css |
|
|
import torch |
|
|
import einops |
|
|
import safetensors.torch as sf |
|
|
import numpy as np |
|
|
|
|
|
from PIL import Image |
|
|
from diffusers import AutoencoderKLHunyuanVideo |
|
|
from transformers import LlamaTokenizerFast, CLIPTokenizer |
|
|
from diffusers_helper.hunyuan import ( |
|
|
encode_prompt_conds, |
|
|
vae_encode, |
|
|
vae_decode_fake, |
|
|
vae_decode, |
|
|
) |
|
|
from diffusers_helper.utils import ( |
|
|
save_bcthw_as_mp4, |
|
|
crop_or_pad_yield_mask, |
|
|
soft_append_bcthw, |
|
|
resize_and_center_crop, |
|
|
generate_timestamp, |
|
|
) |
|
|
from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan |
|
|
from diffusers_helper.memory import ( |
|
|
cpu, |
|
|
gpu, |
|
|
get_cuda_free_memory_gb, |
|
|
move_model_to_device_with_memory_preservation, |
|
|
offload_model_from_device_for_memory_preservation, |
|
|
fake_diffusers_current_device, |
|
|
DynamicSwapInstaller, |
|
|
unload_complete_models, |
|
|
load_model_as_complete, |
|
|
) |
|
|
from diffusers_helper.thread_utils import AsyncStream, async_run |
|
|
from diffusers_helper.gradio.progress_bar import make_progress_bar_html |
|
|
from transformers import SiglipImageProcessor, SiglipVisionModel |
|
|
from diffusers_helper.clip_vision import hf_clip_vision_encode |
|
|
from diffusers_helper.bucket_tools import find_nearest_bucket |
|
|
|
|
|
from eichi_utils.transformer_manager import TransformerManager |
|
|
from eichi_utils.text_encoder_manager import TextEncoderManager |
|
|
|
|
|
from eichi_utils.tensor_processing import ( |
|
|
process_tensor_chunks, |
|
|
print_tensor_info, |
|
|
ensure_tensor_properties, |
|
|
output_latent_to_image, |
|
|
fix_tensor_size, |
|
|
reorder_tensor, |
|
|
) |
|
|
|
|
|
free_mem_gb = get_cuda_free_memory_gb(gpu) |
|
|
high_vram = free_mem_gb > 100 |
|
|
|
|
|
print(translate("Free VRAM {0} GB").format(free_mem_gb)) |
|
|
print(translate("High-VRAM Mode: {0}").format(high_vram)) |
|
|
|
|
|
|
|
|
from eichi_utils.model_downloader import ModelDownloader |
|
|
|
|
|
ModelDownloader().download_original() |
|
|
|
|
|
|
|
|
|
|
|
transformer_manager = TransformerManager( |
|
|
device=gpu, high_vram_mode=high_vram, use_f1_model=False |
|
|
) |
|
|
text_encoder_manager = TextEncoderManager(device=gpu, high_vram_mode=high_vram) |
|
|
|
|
|
try: |
|
|
tokenizer = LlamaTokenizerFast.from_pretrained( |
|
|
"hunyuanvideo-community/HunyuanVideo", subfolder="tokenizer" |
|
|
) |
|
|
tokenizer_2 = CLIPTokenizer.from_pretrained( |
|
|
"hunyuanvideo-community/HunyuanVideo", subfolder="tokenizer_2" |
|
|
) |
|
|
vae = AutoencoderKLHunyuanVideo.from_pretrained( |
|
|
"hunyuanvideo-community/HunyuanVideo", |
|
|
subfolder="vae", |
|
|
torch_dtype=torch.float16, |
|
|
).cpu() |
|
|
|
|
|
|
|
|
if not text_encoder_manager.ensure_text_encoder_state(): |
|
|
raise Exception(translate("text_encoderとtext_encoder_2の初期化に失敗しました")) |
|
|
text_encoder, text_encoder_2 = text_encoder_manager.get_text_encoders() |
|
|
|
|
|
|
|
|
transformer_manager.ensure_download_models() |
|
|
transformer = ( |
|
|
transformer_manager.get_transformer() |
|
|
) |
|
|
|
|
|
|
|
|
feature_extractor = SiglipImageProcessor.from_pretrained( |
|
|
"lllyasviel/flux_redux_bfl", subfolder="feature_extractor" |
|
|
) |
|
|
image_encoder = SiglipVisionModel.from_pretrained( |
|
|
"lllyasviel/flux_redux_bfl", |
|
|
subfolder="image_encoder", |
|
|
torch_dtype=torch.float16, |
|
|
).cpu() |
|
|
except Exception as e: |
|
|
print(translate("モデル読み込みエラー: {0}").format(e)) |
|
|
print(translate("プログラムを終了します...")) |
|
|
import sys |
|
|
|
|
|
sys.exit(1) |
|
|
|
|
|
vae.eval() |
|
|
image_encoder.eval() |
|
|
|
|
|
|
|
|
from eichi_utils import apply_vae_settings, load_vae_settings |
|
|
|
|
|
|
|
|
vae = apply_vae_settings(vae) |
|
|
|
|
|
|
|
|
vae_settings = load_vae_settings() |
|
|
if not high_vram and not vae_settings.get("custom_vae_settings", False): |
|
|
vae.enable_slicing() |
|
|
vae.enable_tiling() |
|
|
|
|
|
vae.to(dtype=torch.float16) |
|
|
image_encoder.to(dtype=torch.float16) |
|
|
|
|
|
vae.requires_grad_(False) |
|
|
image_encoder.requires_grad_(False) |
|
|
|
|
|
if not high_vram: |
|
|
|
|
|
DynamicSwapInstaller.install_model( |
|
|
transformer, device=gpu |
|
|
) |
|
|
else: |
|
|
image_encoder.to(gpu) |
|
|
vae.to(gpu) |
|
|
|
|
|
|
|
|
batch_stopped = False |
|
|
queue_enabled = False |
|
|
queue_type = "prompt" |
|
|
prompt_queue_file_path = None |
|
|
vae_cache_enabled = False |
|
|
image_queue_files = [] |
|
|
input_folder_name_value = "inputs" |
|
|
|
|
|
|
|
|
|
|
|
def get_image_queue_files(): |
|
|
global image_queue_files, input_folder_name_value |
|
|
input_dir = os.path.join( |
|
|
os.path.dirname(os.path.abspath(__file__)), input_folder_name_value |
|
|
) |
|
|
|
|
|
|
|
|
if not os.path.exists(input_dir): |
|
|
print( |
|
|
translate( |
|
|
"入力ディレクトリが存在しません: {0}(保存及び入力フォルダを開くボタンを押すと作成されます)" |
|
|
).format(input_dir) |
|
|
) |
|
|
return [] |
|
|
|
|
|
|
|
|
image_files = [] |
|
|
for file in sorted(os.listdir(input_dir)): |
|
|
if file.lower().endswith((".png", ".jpg", ".jpeg")): |
|
|
image_files.append(os.path.join(input_dir, file)) |
|
|
|
|
|
print( |
|
|
translate("入力ディレクトリから画像ファイル{0}個を読み込みました").format( |
|
|
len(image_files) |
|
|
) |
|
|
) |
|
|
image_queue_files = image_files |
|
|
return image_files |
|
|
|
|
|
|
|
|
stream = AsyncStream() |
|
|
|
|
|
|
|
|
from eichi_utils.settings_manager import ( |
|
|
get_settings_file_path, |
|
|
get_output_folder_path, |
|
|
initialize_settings, |
|
|
load_settings, |
|
|
save_settings, |
|
|
open_output_folder, |
|
|
) |
|
|
|
|
|
|
|
|
webui_folder = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
|
|
|
|
|
settings_folder = os.path.join(webui_folder, "settings") |
|
|
os.makedirs(settings_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
initialize_settings() |
|
|
|
|
|
|
|
|
base_path = os.path.dirname(os.path.abspath(__file__)) |
|
|
|
|
|
|
|
|
app_settings = load_settings() |
|
|
output_folder_name = app_settings.get("output_folder", "outputs") |
|
|
print(translate("設定から出力フォルダを読み込み: {0}").format(output_folder_name)) |
|
|
|
|
|
|
|
|
input_folder_name_value = app_settings.get("input_folder", "inputs") |
|
|
print(translate("設定から入力フォルダを読み込み: {0}").format(input_folder_name_value)) |
|
|
|
|
|
|
|
|
outputs_folder = get_output_folder_path(output_folder_name) |
|
|
os.makedirs(outputs_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
input_dir = os.path.join( |
|
|
os.path.dirname(os.path.abspath(__file__)), input_folder_name_value |
|
|
) |
|
|
os.makedirs(input_dir, exist_ok=True) |
|
|
|
|
|
|
|
|
def is_combine_mode(combine_mode, check_mode): |
|
|
if COMBINE_MODE_OPTIONS[combine_mode] == check_mode: |
|
|
return True |
|
|
else: |
|
|
return False |
|
|
|
|
|
|
|
|
def remove_first_frame_from_tensor_latent(tensor_latent, trim_size): |
|
|
"""テンソルデータの先頭フレームを削除する""" |
|
|
|
|
|
tensor_latent_size = tensor_latent.shape[2] |
|
|
edited_latent = tensor_latent |
|
|
if tensor_latent_size > trim_size: |
|
|
|
|
|
if trim_size > 0: |
|
|
edited_latent = tensor_latent[:, :, trim_size:, :, :].clone() |
|
|
print( |
|
|
translate( |
|
|
"アップロードされたテンソルデータの先頭フレームを削除しました。削除数: {0}/{1}" |
|
|
).format(trim_size, tensor_latent_size) |
|
|
) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"警告: テンソルデータのフレーム数よりも、先頭フレーム削除数が大きく指定されているため、先頭フレーム削除は実施しません。" |
|
|
) |
|
|
) |
|
|
|
|
|
print( |
|
|
translate("テンソルデータ読み込み成功: shape={0}, dtype={1}").format( |
|
|
tensor_latent.shape, tensor_latent.dtype |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate( |
|
|
"テンソルデータ読み込み成功(先頭フレーム削除後): shape={0}, dtype={1}" |
|
|
).format(edited_latent.shape, edited_latent.dtype) |
|
|
) |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate("Tensor data loaded successfully!"), |
|
|
make_progress_bar_html( |
|
|
10, translate("Tensor data loaded successfully!") |
|
|
), |
|
|
), |
|
|
) |
|
|
) |
|
|
return edited_latent |
|
|
|
|
|
|
|
|
@torch.no_grad() |
|
|
def worker( |
|
|
input_image, |
|
|
prompt, |
|
|
seed, |
|
|
steps, |
|
|
cfg, |
|
|
gs, |
|
|
rs, |
|
|
gpu_memory_preservation, |
|
|
use_teacache, |
|
|
mp4_crf=16, |
|
|
end_frame_strength=1.0, |
|
|
keep_section_videos=False, |
|
|
lora_files=None, |
|
|
lora_files2=None, |
|
|
lora_files3=None, |
|
|
lora_scales_text="0.8,0.8,0.8", |
|
|
output_dir=None, |
|
|
save_intermediate_frames=False, |
|
|
use_lora=False, |
|
|
lora_mode=None, |
|
|
lora_dropdown1=None, |
|
|
lora_dropdown2=None, |
|
|
lora_dropdown3=None, |
|
|
save_tensor_data=False, |
|
|
tensor_data_input=None, |
|
|
trim_start_latent_size=0, |
|
|
generation_latent_size=0, |
|
|
combine_mode=COMBINE_MODE_DEFAULT, |
|
|
fp8_optimization=False, |
|
|
batch_index=None, |
|
|
use_vae_cache=False, |
|
|
): |
|
|
|
|
|
global vae_cache_enabled, current_prompt |
|
|
|
|
|
print( |
|
|
f"worker関数でのVAEキャッシュ設定: パラメータ={use_vae_cache}, グローバル変数={vae_cache_enabled}" |
|
|
) |
|
|
|
|
|
|
|
|
current_prompt = prompt |
|
|
current_image = input_image |
|
|
|
|
|
|
|
|
use_vae_cache = vae_cache_enabled or use_vae_cache |
|
|
|
|
|
|
|
|
print(translate("[DEBUG] worker内 input_imageの型: {0}").format(type(input_image))) |
|
|
if isinstance(input_image, str): |
|
|
print( |
|
|
translate("[DEBUG] input_imageはファイルパスです: {0}").format(input_image) |
|
|
) |
|
|
has_any_image = input_image is not None |
|
|
else: |
|
|
print(translate("[DEBUG] input_imageはファイルパス以外です")) |
|
|
has_any_image = input_image is not None |
|
|
|
|
|
if not has_any_image: |
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate( |
|
|
"❗️ 画像が選択されていません\n生成を開始する前に「Image」欄または表示されている最後のキーフレーム画像に画像をアップロードしてください。これはあまねく叡智の始発点となる重要な画像です。" |
|
|
), |
|
|
make_progress_bar_html(0, translate("エラー")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
stream.output_queue.push(("end", None)) |
|
|
return |
|
|
|
|
|
|
|
|
if input_image is None: |
|
|
print(translate("[ERROR] 入力画像は必須です")) |
|
|
raise Exception(translate("入力画像は必須です")) |
|
|
|
|
|
|
|
|
global outputs_folder |
|
|
global output_folder_name |
|
|
if output_dir and output_dir.strip(): |
|
|
|
|
|
outputs_folder = get_output_folder_path(output_dir) |
|
|
print(translate("出力フォルダを設定: {0}").format(outputs_folder)) |
|
|
|
|
|
|
|
|
if output_dir != output_folder_name: |
|
|
settings = load_settings() |
|
|
settings["output_folder"] = output_dir |
|
|
if save_settings(settings): |
|
|
output_folder_name = output_dir |
|
|
print( |
|
|
translate("出力フォルダ設定を保存しました: {0}").format(output_dir) |
|
|
) |
|
|
else: |
|
|
|
|
|
outputs_folder = get_output_folder_path(output_folder_name) |
|
|
print(translate("デフォルト出力フォルダを使用: {0}").format(outputs_folder)) |
|
|
|
|
|
|
|
|
os.makedirs(outputs_folder, exist_ok=True) |
|
|
|
|
|
|
|
|
process_start_time = time.time() |
|
|
|
|
|
|
|
|
global transformer, text_encoder, text_encoder_2 |
|
|
|
|
|
|
|
|
if not text_encoder_manager.ensure_text_encoder_state(): |
|
|
raise Exception(translate("text_encoderとtext_encoder_2の初期化に失敗しました")) |
|
|
text_encoder, text_encoder_2 = text_encoder_manager.get_text_encoders() |
|
|
|
|
|
|
|
|
batch_suffix = f"_batch{batch_index + 1}" if batch_index is not None else "" |
|
|
job_id = generate_timestamp() + batch_suffix |
|
|
|
|
|
stream.output_queue.push( |
|
|
("progress", (None, "", make_progress_bar_html(0, "Starting ..."))) |
|
|
) |
|
|
|
|
|
try: |
|
|
|
|
|
if not high_vram: |
|
|
|
|
|
unload_complete_models(image_encoder, vae) |
|
|
|
|
|
|
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
(None, "", make_progress_bar_html(0, translate("Text encoding ..."))), |
|
|
) |
|
|
) |
|
|
|
|
|
if not high_vram: |
|
|
fake_diffusers_current_device( |
|
|
text_encoder, gpu |
|
|
) |
|
|
load_model_as_complete(text_encoder_2, target_device=gpu) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
llama_vec, clip_l_pooler = encode_prompt_conds( |
|
|
current_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2 |
|
|
) |
|
|
|
|
|
if cfg == 1: |
|
|
llama_vec_n, clip_l_pooler_n = ( |
|
|
torch.zeros_like(llama_vec), |
|
|
torch.zeros_like(clip_l_pooler), |
|
|
) |
|
|
else: |
|
|
|
|
|
llama_vec_n, clip_l_pooler_n = encode_prompt_conds( |
|
|
"", text_encoder, text_encoder_2, tokenizer, tokenizer_2 |
|
|
) |
|
|
|
|
|
llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512) |
|
|
llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask( |
|
|
llama_vec_n, length=512 |
|
|
) |
|
|
|
|
|
|
|
|
if not high_vram: |
|
|
text_encoder, text_encoder_2 = None, None |
|
|
text_encoder_manager.dispose_text_encoders() |
|
|
|
|
|
|
|
|
uploaded_tensor_latents = None |
|
|
if tensor_data_input is not None: |
|
|
try: |
|
|
tensor_path = tensor_data_input.name |
|
|
print( |
|
|
translate("テンソルデータを読み込み: {0}").format( |
|
|
os.path.basename(tensor_path) |
|
|
) |
|
|
) |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
"", |
|
|
make_progress_bar_html( |
|
|
0, translate("Loading tensor data ...") |
|
|
), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
tensor_dict = sf.load_file(tensor_path) |
|
|
|
|
|
|
|
|
print(translate("テンソルデータの内容:")) |
|
|
for key, tensor in tensor_dict.items(): |
|
|
print(f" - {key}: shape={tensor.shape}, dtype={tensor.dtype}") |
|
|
|
|
|
uploaded_tensor_edit_latents = None |
|
|
|
|
|
if "history_latents" in tensor_dict: |
|
|
|
|
|
uploaded_tensor_latents = tensor_dict["history_latents"] |
|
|
|
|
|
preview_tensor = uploaded_tensor_latents |
|
|
preview_tensor = vae_decode_fake(preview_tensor) |
|
|
|
|
|
preview_tensor = ( |
|
|
(preview_tensor * 255.0) |
|
|
.detach() |
|
|
.cpu() |
|
|
.numpy() |
|
|
.clip(0, 255) |
|
|
.astype(np.uint8) |
|
|
) |
|
|
preview_tensor = einops.rearrange( |
|
|
preview_tensor, "b c t h w -> (b h) (t w) c" |
|
|
) |
|
|
|
|
|
desc = "テンソルデータを解析中です ..." |
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
preview_tensor, |
|
|
desc, |
|
|
"", |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
uploaded_tensor_edit_latents = ( |
|
|
remove_first_frame_from_tensor_latent( |
|
|
uploaded_tensor_latents, trim_start_latent_size |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
adjust_height = uploaded_tensor_latents.shape[3] * 8 |
|
|
adjust_width = uploaded_tensor_latents.shape[4] * 8 |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"異常: テンソルデータに 'history_latents' キーが見つかりません" |
|
|
) |
|
|
) |
|
|
except Exception as e: |
|
|
print(translate("テンソルデータ読み込みエラー: {0}").format(e)) |
|
|
import traceback |
|
|
|
|
|
traceback.print_exc() |
|
|
raise Exception("テンソルデータ読み込みエラー") |
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
"", |
|
|
make_progress_bar_html(0, translate("Image processing ...")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
def preprocess_image(img_path_or_array, resolution=640): |
|
|
"""Pathまたは画像配列を処理して適切なサイズに変換する""" |
|
|
print( |
|
|
translate("[DEBUG] preprocess_image: img_path_or_array型 = {0}").format( |
|
|
type(img_path_or_array) |
|
|
) |
|
|
) |
|
|
|
|
|
if img_path_or_array is None: |
|
|
|
|
|
img = np.zeros((resolution, resolution, 3), dtype=np.uint8) |
|
|
height = width = resolution |
|
|
return img, img, height, width |
|
|
|
|
|
|
|
|
if isinstance(img_path_or_array, torch.Tensor): |
|
|
img_path_or_array = img_path_or_array.cpu().numpy() |
|
|
|
|
|
|
|
|
if isinstance(img_path_or_array, str) and os.path.exists(img_path_or_array): |
|
|
|
|
|
img = np.array(Image.open(img_path_or_array).convert("RGB")) |
|
|
else: |
|
|
|
|
|
img = img_path_or_array |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
img_np = resize_and_center_crop( |
|
|
img, target_width=adjust_width, target_height=adjust_height |
|
|
) |
|
|
img_pt = torch.from_numpy(img_np).float() / 127.5 - 1 |
|
|
img_pt = img_pt.permute(2, 0, 1)[None, :, None] |
|
|
return img_np, img_pt, adjust_height, adjust_width |
|
|
|
|
|
input_image_np, input_image_pt, height, width = preprocess_image(current_image) |
|
|
Image.fromarray(input_image_np).save( |
|
|
os.path.join(outputs_folder, f"{job_id}.png") |
|
|
) |
|
|
|
|
|
initial_image_path = os.path.join(outputs_folder, f"{job_id}.png") |
|
|
Image.fromarray(input_image_np).save(initial_image_path) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
metadata = {PROMPT_KEY: prompt, SEED_KEY: seed} |
|
|
|
|
|
embed_metadata_to_png(initial_image_path, metadata) |
|
|
|
|
|
|
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
(None, "", make_progress_bar_html(0, translate("VAE encoding ..."))), |
|
|
) |
|
|
) |
|
|
|
|
|
if not high_vram: |
|
|
load_model_as_complete(vae, target_device=gpu) |
|
|
|
|
|
|
|
|
tensor_info = translate( |
|
|
"テンソルデータ ({0}フレーム) を検出しました。動画生成後に結合します。" |
|
|
).format(uploaded_tensor_latents.shape[2]) |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
tensor_info, |
|
|
make_progress_bar_html(10, translate("テンソルデータを結合")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
input_image_latent = vae_encode(input_image_pt, vae) |
|
|
|
|
|
|
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
"", |
|
|
make_progress_bar_html(0, translate("CLIP Vision encoding ...")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
if not high_vram: |
|
|
load_model_as_complete(image_encoder, target_device=gpu) |
|
|
|
|
|
image_encoder_output = hf_clip_vision_encode( |
|
|
input_image_np, feature_extractor, image_encoder |
|
|
) |
|
|
image_encoder_last_hidden_state = image_encoder_output.last_hidden_state |
|
|
|
|
|
|
|
|
|
|
|
llama_vec = llama_vec.to(transformer.dtype) |
|
|
llama_vec_n = llama_vec_n.to(transformer.dtype) |
|
|
clip_l_pooler = clip_l_pooler.to(transformer.dtype) |
|
|
clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype) |
|
|
image_encoder_last_hidden_state = image_encoder_last_hidden_state.to( |
|
|
transformer.dtype |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
(None, "", make_progress_bar_html(0, translate("Start sampling ..."))), |
|
|
) |
|
|
) |
|
|
|
|
|
rnd = torch.Generator("cpu").manual_seed(seed) |
|
|
fix_uploaded_tensor_pixels = None |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
original_use_lora = use_lora |
|
|
print(f"[DEBUG] UI設定のuse_loraフラグの値: {original_use_lora}") |
|
|
|
|
|
|
|
|
if "PYTORCH_CUDA_ALLOC_CONF" not in os.environ: |
|
|
old_env = os.environ.get("PYTORCH_CUDA_ALLOC_CONF", "") |
|
|
os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "expandable_segments:True" |
|
|
print( |
|
|
translate( |
|
|
"CUDA環境変数設定: PYTORCH_CUDA_ALLOC_CONF=expandable_segments:True (元の値: {0})" |
|
|
).format(old_env) |
|
|
) |
|
|
|
|
|
|
|
|
current_lora_paths = [] |
|
|
current_lora_scales = [] |
|
|
|
|
|
|
|
|
if lora_mode == translate("ディレクトリから選択") and has_lora_support: |
|
|
|
|
|
has_selected_lora = False |
|
|
for dropdown in [lora_dropdown1, lora_dropdown2, lora_dropdown3]: |
|
|
dropdown_value = ( |
|
|
dropdown.value if hasattr(dropdown, "value") else dropdown |
|
|
) |
|
|
|
|
|
|
|
|
if ( |
|
|
dropdown_value == 0 |
|
|
or dropdown_value == "0" |
|
|
or dropdown_value == 0.0 |
|
|
): |
|
|
|
|
|
dropdown_value = translate("なし") |
|
|
|
|
|
|
|
|
if not isinstance(dropdown_value, str) and dropdown_value is not None: |
|
|
dropdown_value = str(dropdown_value) |
|
|
|
|
|
if dropdown_value and dropdown_value != translate("なし"): |
|
|
has_selected_lora = True |
|
|
break |
|
|
|
|
|
|
|
|
if has_selected_lora: |
|
|
use_lora = True |
|
|
print( |
|
|
translate( |
|
|
"[INFO] workerプロセス: ディレクトリでLoRAが選択されているため、LoRA使用を有効にしました" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
elif not use_lora and has_lora_support: |
|
|
if ( |
|
|
(lora_files is not None and hasattr(lora_files, "name")) |
|
|
or (lora_files2 is not None and hasattr(lora_files2, "name")) |
|
|
or (lora_files3 is not None and hasattr(lora_files3, "name")) |
|
|
): |
|
|
use_lora = True |
|
|
print( |
|
|
translate( |
|
|
"[INFO] workerプロセス: LoRAファイルが選択されているため、LoRA使用を有効にしました" |
|
|
) |
|
|
) |
|
|
|
|
|
if use_lora and has_lora_support: |
|
|
|
|
|
lora_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lora") |
|
|
|
|
|
|
|
|
|
|
|
lora_mode_value = lora_mode |
|
|
if hasattr(lora_mode, "value"): |
|
|
try: |
|
|
|
|
|
temp_value = lora_mode.value |
|
|
if temp_value and isinstance(temp_value, str): |
|
|
lora_mode_value = temp_value |
|
|
except: |
|
|
|
|
|
pass |
|
|
print( |
|
|
translate("[DEBUG] lora_mode_value 型: {0}, 値: {1}").format( |
|
|
type(lora_mode_value).__name__, lora_mode_value |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
print(translate("[DEBUG] worker内のLoRAドロップダウン値(元の値):")) |
|
|
print( |
|
|
translate(" - lora_dropdown1: {0}, 型: {1}").format( |
|
|
lora_dropdown1, type(lora_dropdown1).__name__ |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate(" - lora_dropdown2: {0}, 型: {1}").format( |
|
|
lora_dropdown2, type(lora_dropdown2).__name__ |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate(" - lora_dropdown3: {0}, 型: {1}").format( |
|
|
lora_dropdown3, type(lora_dropdown3).__name__ |
|
|
) |
|
|
) |
|
|
if lora_mode_value and lora_mode_value == translate("ディレクトリから選択"): |
|
|
|
|
|
print(translate("[INFO] LoRA読み込み方式: ディレクトリから選択")) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if lora_dropdown2 == 0: |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] lora_dropdown2が数値0になっています。特別処理を実行します" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
lora_dir = os.path.join( |
|
|
os.path.dirname(os.path.abspath(__file__)), "lora" |
|
|
) |
|
|
if os.path.exists(lora_dir): |
|
|
lora_file_listing = [] |
|
|
for filename in os.listdir(lora_dir): |
|
|
if filename.endswith((".safetensors", ".pt", ".bin")): |
|
|
lora_file_listing.append(filename) |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] ディレクトリ内LoRAファイル数: {0}" |
|
|
).format(len(lora_file_listing)) |
|
|
) |
|
|
|
|
|
original_dropdowns = { |
|
|
"LoRA1": lora_dropdown1, |
|
|
"LoRA2": lora_dropdown2, |
|
|
"LoRA3": lora_dropdown3, |
|
|
} |
|
|
|
|
|
print(translate("[DEBUG] ドロップダウン詳細オリジナル値:")) |
|
|
print( |
|
|
f" lora_dropdown1 = {lora_dropdown1}, 型: {type(lora_dropdown1).__name__}" |
|
|
) |
|
|
print( |
|
|
f" lora_dropdown2 = {lora_dropdown2}, 型: {type(lora_dropdown2).__name__}" |
|
|
) |
|
|
print( |
|
|
f" lora_dropdown3 = {lora_dropdown3}, 型: {type(lora_dropdown3).__name__}" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print( |
|
|
translate("[DEBUG] 詳細ログ: LoRA2の値={0!r}, 型={1}").format( |
|
|
lora_dropdown2, type(lora_dropdown2).__name__ |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if lora_dropdown2 == 0: |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] LoRA2の値が0になっているため、詳細な状態を確認" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
lora_dir = os.path.join( |
|
|
os.path.dirname(os.path.abspath(__file__)), "lora" |
|
|
) |
|
|
if os.path.exists(lora_dir): |
|
|
print(translate("[DEBUG] LoRAディレクトリ内容:")) |
|
|
directory_files = [] |
|
|
for filename in os.listdir(lora_dir): |
|
|
if filename.endswith((".safetensors", ".pt", ".bin")): |
|
|
directory_files.append(filename) |
|
|
|
|
|
|
|
|
for i, file in enumerate(directory_files[:5]): |
|
|
print(f" {i + 1}. {file}") |
|
|
|
|
|
dropdown_direct_values = { |
|
|
"dropdown1": original_dropdowns["LoRA1"], |
|
|
"dropdown2": original_dropdowns["LoRA2"], |
|
|
"dropdown3": original_dropdowns["LoRA3"], |
|
|
} |
|
|
|
|
|
|
|
|
for ( |
|
|
dropdown_name, |
|
|
dropdown_direct_value, |
|
|
) in dropdown_direct_values.items(): |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] ドロップダウン{0}処理直接使用: 値={1}, 型={2}, 数値として表示={3}" |
|
|
).format( |
|
|
dropdown_name, |
|
|
repr(dropdown_direct_value), |
|
|
type(dropdown_direct_value).__name__, |
|
|
"0" |
|
|
if dropdown_direct_value == 0 |
|
|
or dropdown_direct_value == 0.0 |
|
|
else "非0またはNone", |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if dropdown_name == "dropdown2" and dropdown_direct_value == 0: |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] dropdown2の特別処理: 数値0が検出されました。元の値: {0}" |
|
|
).format(lora_dropdown2) |
|
|
) |
|
|
if ( |
|
|
isinstance(lora_dropdown2, str) |
|
|
and lora_dropdown2 != "0" |
|
|
and lora_dropdown2 != translate("なし") |
|
|
): |
|
|
|
|
|
dropdown_direct_value = lora_dropdown2 |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] dropdown2の値を元の引数から復元: {0}" |
|
|
).format(dropdown_direct_value) |
|
|
) |
|
|
|
|
|
|
|
|
dropdown_value = dropdown_direct_value |
|
|
|
|
|
|
|
|
|
|
|
if ( |
|
|
dropdown_value == 0 |
|
|
or dropdown_value == 0.0 |
|
|
or dropdown_value == "0" |
|
|
): |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] {name}の値が数値0として検出されました。'なし'として扱います" |
|
|
).format(name=dropdown_name) |
|
|
) |
|
|
dropdown_value = translate("なし") |
|
|
|
|
|
elif dropdown_value is not None and not isinstance( |
|
|
dropdown_value, str |
|
|
): |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] {name}の前処理: 非文字列値が検出されたため文字列変換を実施: 値={1}, 型={2}" |
|
|
).format( |
|
|
dropdown_name, |
|
|
dropdown_value, |
|
|
type(dropdown_value).__name__, |
|
|
) |
|
|
) |
|
|
dropdown_value = str(dropdown_value) |
|
|
|
|
|
|
|
|
if dropdown_value is not None and not isinstance( |
|
|
dropdown_value, str |
|
|
): |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] {name}の値のタイプが依然として非文字列です: {type}" |
|
|
).format( |
|
|
name=dropdown_name, type=type(dropdown_value).__name__ |
|
|
) |
|
|
) |
|
|
dropdown_value = str(dropdown_value) |
|
|
|
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] {name}の最終値 (loading前): 値={value!r}, 型={type}, 'なし'と比較={is_none}" |
|
|
).format( |
|
|
name=dropdown_name, |
|
|
value=dropdown_value, |
|
|
type=type(dropdown_value).__name__, |
|
|
is_none="True" |
|
|
if dropdown_value == translate("なし") |
|
|
else "False", |
|
|
) |
|
|
) |
|
|
|
|
|
if dropdown_value and dropdown_value != translate("なし"): |
|
|
lora_path = os.path.join(lora_dir, dropdown_value) |
|
|
print( |
|
|
translate("[DEBUG] {name}のロード試行: パス={path}").format( |
|
|
name=dropdown_name, path=lora_path |
|
|
) |
|
|
) |
|
|
if os.path.exists(lora_path): |
|
|
current_lora_paths.append(lora_path) |
|
|
print( |
|
|
translate("[INFO] {name}を選択: {path}").format( |
|
|
name=dropdown_name, path=lora_path |
|
|
) |
|
|
) |
|
|
else: |
|
|
|
|
|
if os.path.dirname( |
|
|
lora_path |
|
|
) == lora_dir and not os.path.isabs(dropdown_value): |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[WARN] 選択された{name}が見つかりません: {file}" |
|
|
).format(name=dropdown_name, file=dropdown_value) |
|
|
) |
|
|
else: |
|
|
|
|
|
lora_path_retry = os.path.join( |
|
|
lora_dir, os.path.basename(str(dropdown_value)) |
|
|
) |
|
|
print( |
|
|
translate("[DEBUG] {name}を再試行: {path}").format( |
|
|
name=dropdown_name, path=lora_path_retry |
|
|
) |
|
|
) |
|
|
if os.path.exists(lora_path_retry): |
|
|
current_lora_paths.append(lora_path_retry) |
|
|
print( |
|
|
translate( |
|
|
"[INFO] {name}を選択 (パス修正後): {path}" |
|
|
).format( |
|
|
name=dropdown_name, path=lora_path_retry |
|
|
) |
|
|
) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] 選択された{name}が見つかりません: {file}" |
|
|
).format( |
|
|
name=dropdown_name, file=dropdown_value |
|
|
) |
|
|
) |
|
|
else: |
|
|
|
|
|
print(translate("[INFO] LoRA読み込み方式: ファイルアップロード")) |
|
|
|
|
|
|
|
|
if lora_files is not None: |
|
|
|
|
|
print( |
|
|
f"[DEBUG] lora_filesの型: {type(lora_files)}, 値: {lora_files}" |
|
|
) |
|
|
|
|
|
if isinstance(lora_files, list): |
|
|
|
|
|
|
|
|
for file in lora_files: |
|
|
if hasattr(file, "name") and file.name: |
|
|
current_lora_paths.append(file.name) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] LoRAファイル1のリスト内に無効なファイルがあります" |
|
|
) |
|
|
) |
|
|
else: |
|
|
|
|
|
|
|
|
if hasattr(lora_files, "name") and lora_files.name: |
|
|
current_lora_paths.append(lora_files.name) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] 1つ目のLoRAファイルは無効か選択されていません" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if lora_files2 is not None: |
|
|
|
|
|
print( |
|
|
f"[DEBUG] lora_files2の型: {type(lora_files2)}, 値: {lora_files2}" |
|
|
) |
|
|
|
|
|
if isinstance(lora_files2, list): |
|
|
|
|
|
|
|
|
for file in lora_files2: |
|
|
if hasattr(file, "name") and file.name: |
|
|
current_lora_paths.append(file.name) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] LoRAファイル2のリスト内に無効なファイルがあります" |
|
|
) |
|
|
) |
|
|
else: |
|
|
|
|
|
|
|
|
if hasattr(lora_files2, "name") and lora_files2.name: |
|
|
current_lora_paths.append(lora_files2.name) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] 2つ目のLoRAファイルは無効か選択されていません" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if lora_files3 is not None: |
|
|
|
|
|
print( |
|
|
f"[DEBUG] lora_files3の型: {type(lora_files3)}, 値: {lora_files3}" |
|
|
) |
|
|
|
|
|
if isinstance(lora_files3, list): |
|
|
|
|
|
|
|
|
for file in lora_files3: |
|
|
if hasattr(file, "name") and file.name: |
|
|
current_lora_paths.append(file.name) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] LoRAファイル3のリスト内に無効なファイルがあります" |
|
|
) |
|
|
) |
|
|
else: |
|
|
|
|
|
|
|
|
if hasattr(lora_files3, "name") and lora_files3.name: |
|
|
current_lora_paths.append(lora_files3.name) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] 3つ目のLoRAファイルは無効か選択されていません" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if current_lora_paths: |
|
|
try: |
|
|
scales_text = lora_scales_text.strip() |
|
|
if scales_text: |
|
|
|
|
|
scales = [ |
|
|
float(scale.strip()) for scale in scales_text.split(",") |
|
|
] |
|
|
current_lora_scales = scales |
|
|
else: |
|
|
|
|
|
current_lora_scales = [0.8] * len(current_lora_paths) |
|
|
except Exception as e: |
|
|
print(translate("LoRAスケール解析エラー: {0}").format(e)) |
|
|
print(translate("デフォルトスケール 0.8 を使用します")) |
|
|
current_lora_scales = [0.8] * len(current_lora_paths) |
|
|
|
|
|
|
|
|
if len(current_lora_scales) < len(current_lora_paths): |
|
|
|
|
|
current_lora_scales.extend( |
|
|
[0.8] * (len(current_lora_paths) - len(current_lora_scales)) |
|
|
) |
|
|
elif len(current_lora_scales) > len(current_lora_paths): |
|
|
|
|
|
current_lora_scales = current_lora_scales[: len(current_lora_paths)] |
|
|
|
|
|
|
|
|
if original_use_lora: |
|
|
use_lora = True |
|
|
print( |
|
|
translate( |
|
|
"[INFO] UIでLoRA使用が有効化されているため、LoRA使用を有効にします" |
|
|
) |
|
|
) |
|
|
|
|
|
print(f"[DEBUG] 最終的なuse_loraフラグ: {use_lora}") |
|
|
|
|
|
|
|
|
transformer_manager.set_next_settings( |
|
|
lora_paths=current_lora_paths, |
|
|
lora_scales=current_lora_scales, |
|
|
fp8_enabled=fp8_optimization, |
|
|
high_vram_mode=high_vram, |
|
|
force_dict_split=True, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(translate("\ntransformer状態チェック...")) |
|
|
try: |
|
|
|
|
|
if not transformer_manager.ensure_transformer_state(): |
|
|
raise Exception(translate("transformer状態の確認に失敗しました")) |
|
|
|
|
|
|
|
|
transformer = transformer_manager.get_transformer() |
|
|
print(translate("transformer状態チェック完了")) |
|
|
except Exception as e: |
|
|
print(translate("transformer状態チェックエラー: {0}").format(e)) |
|
|
import traceback |
|
|
|
|
|
traceback.print_exc() |
|
|
raise e |
|
|
|
|
|
if stream.input_queue.top() == "end": |
|
|
stream.output_queue.push(("end", None)) |
|
|
return |
|
|
|
|
|
combined_output_filename = None |
|
|
|
|
|
|
|
|
try: |
|
|
if not high_vram: |
|
|
load_model_as_complete(vae, target_device=gpu) |
|
|
|
|
|
|
|
|
fix_uploaded_tensor_latent = fix_tensor_size(uploaded_tensor_edit_latents) |
|
|
|
|
|
|
|
|
uploaded_tensor_edit_pixels, _ = process_tensor_chunks( |
|
|
tensor=uploaded_tensor_edit_latents, |
|
|
frames=uploaded_tensor_edit_latents.shape[2], |
|
|
use_vae_cache=use_vae_cache, |
|
|
job_id=job_id, |
|
|
outputs_folder=outputs_folder, |
|
|
mp4_crf=mp4_crf, |
|
|
stream=stream, |
|
|
vae=vae, |
|
|
) |
|
|
|
|
|
if is_combine_mode(combine_mode, COMBINE_MODE.FIRST): |
|
|
|
|
|
uploaded_tensor_one_latent = fix_uploaded_tensor_latent[ |
|
|
:, :, 0, :, : |
|
|
].clone() |
|
|
else: |
|
|
|
|
|
uploaded_tensor_one_latent = fix_uploaded_tensor_latent[ |
|
|
:, :, -1, :, : |
|
|
].clone() |
|
|
|
|
|
fix_uploaded_tensor_latent = reorder_tensor( |
|
|
fix_uploaded_tensor_latent, True |
|
|
) |
|
|
|
|
|
|
|
|
first_image_latent = input_image_latent |
|
|
last_image_latent = uploaded_tensor_one_latent.unsqueeze(2) |
|
|
|
|
|
|
|
|
output_latent_to_image( |
|
|
first_image_latent, |
|
|
os.path.join(outputs_folder, f"{job_id}_generation_start.png"), |
|
|
vae, |
|
|
use_vae_cache, |
|
|
) |
|
|
output_latent_to_image( |
|
|
last_image_latent, |
|
|
os.path.join(outputs_folder, f"{job_id}_generation_end.png"), |
|
|
vae, |
|
|
use_vae_cache, |
|
|
) |
|
|
|
|
|
if not high_vram: |
|
|
unload_complete_models() |
|
|
|
|
|
preserved_memory = ( |
|
|
float(gpu_memory_preservation) |
|
|
if gpu_memory_preservation is not None |
|
|
else 6.0 |
|
|
) |
|
|
print( |
|
|
translate( |
|
|
"Setting transformer memory preservation to: {0} GB" |
|
|
).format(preserved_memory) |
|
|
) |
|
|
move_model_to_device_with_memory_preservation( |
|
|
transformer, target_device=gpu, preserved_memory_gb=preserved_memory |
|
|
) |
|
|
|
|
|
if use_teacache: |
|
|
transformer.initialize_teacache(enable_teacache=True, num_steps=steps) |
|
|
else: |
|
|
transformer.initialize_teacache(enable_teacache=False) |
|
|
|
|
|
|
|
|
uploaded_frames = uploaded_tensor_edit_latents.shape[2] |
|
|
|
|
|
print( |
|
|
translate("新規に動画を生成します。結合箇所: {direction}").format( |
|
|
direction=get_combine_mode(combine_mode), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate( |
|
|
"新規に動画を生成します。結合箇所: {direction}" |
|
|
).format( |
|
|
direction=get_combine_mode(combine_mode), |
|
|
), |
|
|
make_progress_bar_html(80, translate("新規動画生成")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
def callback_generation(d): |
|
|
preview = d["denoised"] |
|
|
preview = vae_decode_fake(preview) |
|
|
|
|
|
preview = ( |
|
|
(preview * 255.0) |
|
|
.detach() |
|
|
.cpu() |
|
|
.numpy() |
|
|
.clip(0, 255) |
|
|
.astype(np.uint8) |
|
|
) |
|
|
preview = einops.rearrange(preview, "b c t h w -> (b h) (t w) c") |
|
|
|
|
|
if stream.input_queue.top() == "end": |
|
|
stream.output_queue.push(("end", None)) |
|
|
raise KeyboardInterrupt("User ends the task.") |
|
|
|
|
|
current_step = d["i"] + 1 |
|
|
percentage = int(100.0 * current_step / steps) |
|
|
hint = translate("Sampling {0}/{1}").format(current_step, steps) |
|
|
desc = "新規動画を生成中です ..." |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
preview, |
|
|
desc, |
|
|
make_progress_bar_html(percentage, hint), |
|
|
), |
|
|
) |
|
|
) |
|
|
return |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
effective_window_size = generation_latent_size |
|
|
|
|
|
|
|
|
generation_num_frames = int(effective_window_size * 4 - 3) |
|
|
|
|
|
|
|
|
indices = torch.arange( |
|
|
0, sum([1, effective_window_size, 1, 2, 16]) |
|
|
).unsqueeze(0) |
|
|
( |
|
|
clean_latent_indices_pre, |
|
|
latent_indices, |
|
|
clean_latent_indices_post, |
|
|
clean_latent_2x_indices, |
|
|
clean_latent_4x_indices, |
|
|
) = indices.split([1, effective_window_size, 1, 2, 16], dim=1) |
|
|
clean_latent_indices = torch.cat( |
|
|
[clean_latent_indices_pre, clean_latent_indices_post], dim=1 |
|
|
) |
|
|
clean_latents_post, clean_latents_2x, clean_latents_4x = ( |
|
|
fix_uploaded_tensor_latent[:, :, : 1 + 2 + 16, :, :].split( |
|
|
[1, 2, 16], dim=2 |
|
|
) |
|
|
) |
|
|
first_image_latent = first_image_latent.to(clean_latents_post.device) |
|
|
last_image_latent = last_image_latent.to(clean_latents_post.device) |
|
|
clean_latents = torch.cat([first_image_latent, last_image_latent], dim=2) |
|
|
|
|
|
|
|
|
generated_latents = sample_hunyuan( |
|
|
transformer=transformer, |
|
|
sampler="unipc", |
|
|
width=width, |
|
|
height=height, |
|
|
frames=generation_num_frames, |
|
|
real_guidance_scale=cfg, |
|
|
distilled_guidance_scale=gs, |
|
|
guidance_rescale=rs, |
|
|
|
|
|
num_inference_steps=steps, |
|
|
generator=rnd, |
|
|
prompt_embeds=llama_vec, |
|
|
prompt_embeds_mask=llama_attention_mask, |
|
|
prompt_poolers=clip_l_pooler, |
|
|
negative_prompt_embeds=llama_vec_n, |
|
|
negative_prompt_embeds_mask=llama_attention_mask_n, |
|
|
negative_prompt_poolers=clip_l_pooler_n, |
|
|
device=gpu, |
|
|
dtype=torch.bfloat16, |
|
|
image_embeddings=image_encoder_last_hidden_state, |
|
|
latent_indices=latent_indices, |
|
|
clean_latents=clean_latents, |
|
|
clean_latent_indices=clean_latent_indices, |
|
|
clean_latents_2x=clean_latents_2x, |
|
|
clean_latent_2x_indices=clean_latent_2x_indices, |
|
|
clean_latents_4x=clean_latents_4x, |
|
|
clean_latent_4x_indices=clean_latent_4x_indices, |
|
|
callback=callback_generation, |
|
|
) |
|
|
|
|
|
|
|
|
device = uploaded_tensor_edit_latents.device |
|
|
generated_latents = generated_latents.to(device) |
|
|
|
|
|
if not high_vram: |
|
|
|
|
|
preserved_memory_offload = 8.0 |
|
|
print( |
|
|
translate( |
|
|
"Offloading transformer with memory preservation: {0} GB" |
|
|
).format(preserved_memory_offload) |
|
|
) |
|
|
offload_model_from_device_for_memory_preservation( |
|
|
transformer, |
|
|
target_device=gpu, |
|
|
preserved_memory_gb=preserved_memory_offload, |
|
|
) |
|
|
load_model_as_complete(vae, target_device=gpu) |
|
|
|
|
|
reverse = False |
|
|
if is_combine_mode(combine_mode, COMBINE_MODE.LAST): |
|
|
reverse = True |
|
|
|
|
|
|
|
|
generated_pixels, _ = process_tensor_chunks( |
|
|
tensor=generated_latents, |
|
|
frames=generated_latents.shape[2], |
|
|
use_vae_cache=use_vae_cache, |
|
|
job_id=job_id, |
|
|
outputs_folder=outputs_folder, |
|
|
mp4_crf=mp4_crf, |
|
|
stream=stream, |
|
|
vae=vae, |
|
|
reverse=reverse, |
|
|
) |
|
|
|
|
|
|
|
|
generated_pixels = reorder_tensor( |
|
|
generated_pixels, |
|
|
is_combine_mode(combine_mode, COMBINE_MODE.LAST), |
|
|
) |
|
|
|
|
|
|
|
|
generation_output_filename = os.path.join( |
|
|
outputs_folder, f"{job_id}_generation.mp4" |
|
|
) |
|
|
save_bcthw_as_mp4( |
|
|
generated_pixels, |
|
|
generation_output_filename, |
|
|
fps=30, |
|
|
crf=mp4_crf, |
|
|
) |
|
|
print( |
|
|
translate( |
|
|
"生成データの動画を保存しました: {generation_output_filename}" |
|
|
).format(generation_output_filename=generation_output_filename) |
|
|
) |
|
|
|
|
|
generated_frames = generated_pixels.shape[2] |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print( |
|
|
translate( |
|
|
"テンソルデータと生成データを結合します。結合箇所: {direction}, アップロードされたフレーム数 = {uploaded_frames}, 生成動画のフレーム数 = {generated_frames}" |
|
|
).format( |
|
|
direction=get_combine_mode(combine_mode), |
|
|
uploaded_frames=uploaded_frames, |
|
|
generated_frames=generated_frames, |
|
|
) |
|
|
) |
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate( |
|
|
"テンソルデータと生成データを結合します。結合箇所: {direction}, アップロードされたフレーム数 = {uploaded_frames}, 生成動画のフレーム数 = {generated_frames}" |
|
|
).format( |
|
|
direction=get_combine_mode(combine_mode), |
|
|
uploaded_frames=uploaded_frames, |
|
|
generated_frames=generated_frames, |
|
|
), |
|
|
make_progress_bar_html(90, translate("テンソルデータ結合準備")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
overlapped_frames = 0 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if is_combine_mode(combine_mode, COMBINE_MODE.FIRST): |
|
|
combined_pixels = soft_append_bcthw( |
|
|
generated_pixels.cpu(), |
|
|
uploaded_tensor_edit_pixels.cpu(), |
|
|
overlapped_frames, |
|
|
) |
|
|
else: |
|
|
combined_pixels = soft_append_bcthw( |
|
|
uploaded_tensor_edit_pixels.cpu(), |
|
|
generated_pixels.cpu(), |
|
|
overlapped_frames, |
|
|
) |
|
|
del uploaded_tensor_edit_pixels |
|
|
del generated_pixels |
|
|
|
|
|
print(translate("新規生成データとテンソルデータのフレームを結合しました。")) |
|
|
|
|
|
print("combined_pixels: ", combined_pixels.shape) |
|
|
|
|
|
if combined_pixels is not None: |
|
|
|
|
|
print(translate("[DEBUG] 最終結合結果:")) |
|
|
print(translate(" - 形状: {0}").format(combined_pixels.shape)) |
|
|
print(translate(" - 型: {0}").format(combined_pixels.dtype)) |
|
|
print(translate(" - デバイス: {0}").format(combined_pixels.device)) |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate("結合した動画をMP4に変換中..."), |
|
|
make_progress_bar_html(95, translate("最終MP4変換処理")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
combined_output_filename = os.path.join( |
|
|
outputs_folder, f"{job_id}_combined.mp4" |
|
|
) |
|
|
|
|
|
|
|
|
save_bcthw_as_mp4( |
|
|
combined_pixels, |
|
|
combined_output_filename, |
|
|
fps=30, |
|
|
crf=mp4_crf, |
|
|
) |
|
|
print( |
|
|
translate("最終結果を保存しました: {0}").format( |
|
|
combined_output_filename |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate("結合動画の保存場所: {0}").format( |
|
|
os.path.abspath(combined_output_filename) |
|
|
) |
|
|
) |
|
|
print(translate("中間ファイルの削除を開始します...")) |
|
|
deleted_files = [] |
|
|
try: |
|
|
import re |
|
|
|
|
|
interim_pattern = re.compile(rf"{job_id}_combined_interim_\d+\.mp4") |
|
|
deleted_count = 0 |
|
|
|
|
|
for filename in os.listdir(outputs_folder): |
|
|
if interim_pattern.match(filename): |
|
|
interim_path = os.path.join(outputs_folder, filename) |
|
|
try: |
|
|
os.remove(interim_path) |
|
|
deleted_files.append(filename) |
|
|
deleted_count += 1 |
|
|
print( |
|
|
translate( |
|
|
" - 中間ファイルを削除しました: {0}" |
|
|
).format(filename) |
|
|
) |
|
|
except Exception as e: |
|
|
print( |
|
|
translate( |
|
|
" - ファイル削除エラー ({0}): {1}" |
|
|
).format(filename, str(e)) |
|
|
) |
|
|
|
|
|
if deleted_count > 0: |
|
|
print( |
|
|
translate("中間ファイル {0} 個を削除しました").format( |
|
|
deleted_count |
|
|
) |
|
|
) |
|
|
files_str = ", ".join(deleted_files) |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate("中間ファイルを削除しました: {0}").format( |
|
|
files_str |
|
|
), |
|
|
make_progress_bar_html( |
|
|
97, translate("クリーンアップ完了") |
|
|
), |
|
|
), |
|
|
) |
|
|
) |
|
|
else: |
|
|
print(translate("削除対象の中間ファイルは見つかりませんでした")) |
|
|
except Exception as e: |
|
|
print( |
|
|
translate( |
|
|
"中間ファイル削除中にエラーが発生しました: {0}" |
|
|
).format(e) |
|
|
) |
|
|
import traceback |
|
|
|
|
|
traceback.print_exc() |
|
|
|
|
|
|
|
|
stream.output_queue.push(("file", combined_output_filename)) |
|
|
|
|
|
|
|
|
combined_frames = combined_pixels.shape[2] |
|
|
combined_size_mb = ( |
|
|
combined_pixels.element_size() * combined_pixels.nelement() |
|
|
) / (1024 * 1024) |
|
|
print( |
|
|
translate( |
|
|
"結合完了情報: テンソルデータ({0}フレーム) + 新規動画({1}フレーム) = 合計{2}フレーム" |
|
|
).format(uploaded_frames, generated_frames, combined_frames) |
|
|
) |
|
|
print( |
|
|
translate("結合動画の再生時間: {0:.2f}秒").format( |
|
|
combined_frames / 30 |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate("データサイズ: {0:.2f} MB(制限無し)").format( |
|
|
combined_size_mb |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate( |
|
|
"テンソルデータ({0}フレーム)と動画({1}フレーム)の結合が完了しました。\n合計フレーム数: {2}フレーム ({3:.2f}秒) - サイズ制限なし" |
|
|
).format( |
|
|
uploaded_frames, |
|
|
generated_frames, |
|
|
combined_frames, |
|
|
combined_frames / 30, |
|
|
), |
|
|
make_progress_bar_html(100, translate("結合完了")), |
|
|
), |
|
|
) |
|
|
) |
|
|
else: |
|
|
print(translate("テンソルデータの結合に失敗しました。")) |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate("テンソルデータの結合に失敗しました。"), |
|
|
make_progress_bar_html(100, translate("エラー")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
stream.output_queue.push(("file", combined_output_filename)) |
|
|
|
|
|
|
|
|
combined_frames = combined_pixels.shape[2] |
|
|
combined_size_mb = ( |
|
|
combined_pixels.element_size() * combined_pixels.nelement() |
|
|
) / (1024 * 1024) |
|
|
print( |
|
|
translate( |
|
|
"結合完了情報: テンソルデータ({0}フレーム) + 新規動画({1}フレーム) = 合計{2}フレーム" |
|
|
).format(uploaded_frames, generated_frames, combined_frames) |
|
|
) |
|
|
print( |
|
|
translate("結合動画の再生時間: {0:.2f}秒").format(combined_frames / 30) |
|
|
) |
|
|
print( |
|
|
translate("データサイズ: {0:.2f} MB(制限無し)").format( |
|
|
combined_size_mb |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate( |
|
|
"テンソルデータ({0}フレーム)と動画({1}フレーム)の結合が完了しました。\n合計フレーム数: {2}フレーム ({3:.2f}秒)" |
|
|
).format( |
|
|
uploaded_frames, |
|
|
generated_frames, |
|
|
combined_frames, |
|
|
combined_frames / 30, |
|
|
), |
|
|
make_progress_bar_html(100, translate("結合完了")), |
|
|
), |
|
|
) |
|
|
) |
|
|
except Exception as e: |
|
|
print( |
|
|
translate("テンソルデータ結合中にエラーが発生しました: {0}").format(e) |
|
|
) |
|
|
import traceback |
|
|
|
|
|
traceback.print_exc() |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate( |
|
|
"エラー: テンソルデータ結合に失敗しました - {0}" |
|
|
).format(str(e)), |
|
|
make_progress_bar_html(100, translate("エラー")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if HAS_WINSOUND: |
|
|
winsound.PlaySound("SystemExclamation", winsound.SND_ALIAS) |
|
|
else: |
|
|
print(translate("\n✓ 処理が完了しました!")) |
|
|
|
|
|
|
|
|
if torch.cuda.is_available(): |
|
|
torch.cuda.synchronize() |
|
|
torch.cuda.empty_cache() |
|
|
import gc |
|
|
|
|
|
gc.collect() |
|
|
print( |
|
|
translate( |
|
|
"[MEMORY] 処理完了後のメモリクリア: {memory:.2f}GB/{total_memory:.2f}GB" |
|
|
).format( |
|
|
memory=torch.cuda.memory_allocated() / 1024**3, |
|
|
total_memory=torch.cuda.get_device_properties(0).total_memory |
|
|
/ 1024**3, |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
print( |
|
|
translate("[DEBUG] テンソルデータ保存フラグの値: {0}").format( |
|
|
save_tensor_data |
|
|
) |
|
|
) |
|
|
if save_tensor_data: |
|
|
try: |
|
|
|
|
|
combined_latents = torch.cat( |
|
|
[generated_latents, uploaded_tensor_edit_latents], dim=2 |
|
|
) |
|
|
|
|
|
|
|
|
tensor_file_path = os.path.join(outputs_folder, f"{job_id}.safetensors") |
|
|
|
|
|
|
|
|
print(translate("=== テンソルデータ保存処理開始 ===")) |
|
|
print( |
|
|
translate("保存対象フレーム数: {frames}").format( |
|
|
frames=combined_latents.shape[2] |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
combined_latents = combined_latents.cpu() |
|
|
|
|
|
|
|
|
tensor_size_mb = ( |
|
|
combined_latents.element_size() * combined_latents.nelement() |
|
|
) / (1024 * 1024) |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"テンソルデータを保存中... shape: {shape}, フレーム数: {frames}, サイズ: {size:.2f} MB" |
|
|
).format( |
|
|
shape=combined_latents.shape, |
|
|
frames=combined_latents.shape[2], |
|
|
size=tensor_size_mb, |
|
|
) |
|
|
) |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate( |
|
|
"テンソルデータを保存中... ({frames}フレーム)" |
|
|
).format(frames=combined_latents.shape[2]), |
|
|
make_progress_bar_html( |
|
|
95, translate("テンソルデータの保存") |
|
|
), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
metadata = torch.tensor( |
|
|
[height, width, combined_latents.shape[2]], dtype=torch.int32 |
|
|
) |
|
|
|
|
|
|
|
|
tensor_dict = { |
|
|
"history_latents": combined_latents, |
|
|
"metadata": metadata, |
|
|
} |
|
|
sf.save_file(tensor_dict, tensor_file_path) |
|
|
|
|
|
print( |
|
|
translate("テンソルデータを保存しました: {path}").format( |
|
|
path=tensor_file_path |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate( |
|
|
"保存済みテンソルデータ情報: {frames}フレーム, {size:.2f} MB" |
|
|
).format(frames=combined_latents.shape[2], size=tensor_size_mb) |
|
|
) |
|
|
print(translate("=== テンソルデータ保存処理完了 ===")) |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate( |
|
|
"テンソルデータが保存されました: {path} ({frames}フレーム, {size:.2f} MB)" |
|
|
).format( |
|
|
path=os.path.basename(tensor_file_path), |
|
|
frames=combined_latents.shape[2], |
|
|
size=tensor_size_mb, |
|
|
), |
|
|
make_progress_bar_html(100, translate("処理完了")), |
|
|
), |
|
|
) |
|
|
) |
|
|
except Exception as e: |
|
|
print(translate("テンソルデータ保存エラー: {0}").format(e)) |
|
|
import traceback |
|
|
|
|
|
traceback.print_exc() |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
translate("テンソルデータの保存中にエラーが発生しました。"), |
|
|
make_progress_bar_html(100, translate("処理完了")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
process_end_time = time.time() |
|
|
total_process_time = process_end_time - process_start_time |
|
|
hours, remainder = divmod(total_process_time, 3600) |
|
|
minutes, seconds = divmod(remainder, 60) |
|
|
time_str = "" |
|
|
if hours > 0: |
|
|
time_str = translate("{0}時間 {1}分 {2}秒").format( |
|
|
int(hours), int(minutes), f"{seconds:.1f}" |
|
|
) |
|
|
elif minutes > 0: |
|
|
time_str = translate("{0}分 {1}秒").format(int(minutes), f"{seconds:.1f}") |
|
|
else: |
|
|
time_str = translate("{0:.1f}秒").format(seconds) |
|
|
print(translate("\n全体の処理時間: {0}").format(time_str)) |
|
|
|
|
|
|
|
|
|
|
|
print(combined_output_filename) |
|
|
combined_filename_only = os.path.basename(combined_output_filename) |
|
|
completion_message = translate( |
|
|
"テンソルデータとの結合が完了しました。結合ファイル名: {filename}\n全体の処理時間: {time}" |
|
|
).format(filename=combined_filename_only, time=time_str) |
|
|
|
|
|
output_filename = combined_output_filename |
|
|
|
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
completion_message, |
|
|
make_progress_bar_html(100, translate("処理完了")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if not keep_section_videos: |
|
|
|
|
|
final_video_path = output_filename |
|
|
final_video_name = os.path.basename(final_video_path) |
|
|
|
|
|
job_id_part = job_id |
|
|
|
|
|
|
|
|
files = os.listdir(outputs_folder) |
|
|
deleted_count = 0 |
|
|
|
|
|
for file in files: |
|
|
|
|
|
|
|
|
if ( |
|
|
file.startswith(job_id_part) |
|
|
and file.endswith(".mp4") |
|
|
and file != final_video_name |
|
|
and "combined" not in file |
|
|
): |
|
|
file_path = os.path.join(outputs_folder, file) |
|
|
try: |
|
|
os.remove(file_path) |
|
|
deleted_count += 1 |
|
|
print(translate("[削除] 中間ファイル: {0}").format(file)) |
|
|
except Exception as e: |
|
|
print( |
|
|
translate( |
|
|
"[エラー] ファイル削除時のエラー {0}: {1}" |
|
|
).format(file, e) |
|
|
) |
|
|
|
|
|
if deleted_count > 0: |
|
|
print( |
|
|
translate( |
|
|
"[済] {0}個の中間ファイルを削除しました。最終ファイルは保存されています: {1}" |
|
|
).format(deleted_count, final_video_name) |
|
|
) |
|
|
final_message = translate( |
|
|
"中間ファイルを削除しました。最終動画と結合動画は保存されています。" |
|
|
) |
|
|
stream.output_queue.push( |
|
|
( |
|
|
"progress", |
|
|
( |
|
|
None, |
|
|
final_message, |
|
|
make_progress_bar_html(100, translate("処理完了")), |
|
|
), |
|
|
) |
|
|
) |
|
|
|
|
|
if not high_vram: |
|
|
unload_complete_models() |
|
|
except: |
|
|
import traceback |
|
|
|
|
|
traceback.print_exc() |
|
|
|
|
|
if not high_vram: |
|
|
unload_complete_models( |
|
|
text_encoder, text_encoder_2, image_encoder, vae, transformer |
|
|
) |
|
|
|
|
|
stream.output_queue.push(("end", None)) |
|
|
return |
|
|
|
|
|
|
|
|
|
|
|
def validate_images(input_image, length_radio=None, frame_size_radio=None): |
|
|
"""入力画像が有効かを確認する""" |
|
|
|
|
|
if input_image is not None: |
|
|
return True, "" |
|
|
|
|
|
|
|
|
error_html = f""" |
|
|
<div style="padding: 15px; border-radius: 10px; background-color: #ffebee; border: 1px solid #f44336; margin: 10px 0;"> |
|
|
<h3 style="color: #d32f2f; margin: 0 0 10px 0;">{translate("❗️ 画像が選択されていません")}</h3> |
|
|
<p>{translate("生成を開始する前に「Image」欄に画像をアップロードしてください。これはあまねく叡智の始発点となる重要な画像です。")}</p> |
|
|
</div> |
|
|
""" |
|
|
error_bar = make_progress_bar_html(100, translate("画像がありません")) |
|
|
return False, error_html + error_bar |
|
|
|
|
|
|
|
|
def process( |
|
|
input_image, |
|
|
prompt, |
|
|
seed, |
|
|
steps, |
|
|
cfg, |
|
|
gs, |
|
|
rs, |
|
|
gpu_memory_preservation, |
|
|
use_teacache, |
|
|
mp4_crf=16, |
|
|
end_frame_strength=1.0, |
|
|
keep_section_videos=False, |
|
|
lora_files=None, |
|
|
lora_files2=None, |
|
|
lora_files3=None, |
|
|
lora_scales_text="0.8,0.8,0.8", |
|
|
output_dir=None, |
|
|
save_intermediate_frames=False, |
|
|
use_lora=False, |
|
|
lora_mode=None, |
|
|
lora_dropdown1=None, |
|
|
lora_dropdown2=None, |
|
|
lora_dropdown3=None, |
|
|
save_tensor_data=False, |
|
|
tensor_data_input=None, |
|
|
trim_start_latent_size=0, |
|
|
generation_latent_size=0, |
|
|
combine_mode=COMBINE_MODE_DEFAULT, |
|
|
fp8_optimization=False, |
|
|
batch_count=1, |
|
|
use_vae_cache=False, |
|
|
): |
|
|
|
|
|
print( |
|
|
f"process関数開始時のVAEキャッシュ設定: {use_vae_cache}, 型: {type(use_vae_cache)}" |
|
|
) |
|
|
global stream |
|
|
global batch_stopped |
|
|
global queue_enabled |
|
|
global queue_type |
|
|
global prompt_queue_file_path |
|
|
global vae_cache_enabled |
|
|
global image_queue_files |
|
|
|
|
|
|
|
|
batch_stopped = False |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
batch_count_val = int(batch_count) |
|
|
batch_count = max(1, min(batch_count_val, 100)) |
|
|
except (ValueError, TypeError): |
|
|
print( |
|
|
translate( |
|
|
"[WARN] バッチ処理回数が無効です。デフォルト値の1を使用します: {0}" |
|
|
).format(batch_count) |
|
|
) |
|
|
batch_count = 1 |
|
|
|
|
|
print(translate("\u25c6 バッチ処理回数: {0}回").format(batch_count)) |
|
|
|
|
|
|
|
|
|
|
|
from diffusers_helper.bucket_tools import SAFE_RESOLUTIONS |
|
|
|
|
|
|
|
|
resolution = 640 |
|
|
|
|
|
|
|
|
print(translate("解像度を設定: {0}").format(resolution)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
print(translate("\n==== 動画生成開始 =====")) |
|
|
print(translate("\u25c6 サンプリングステップ数: {0}").format(steps)) |
|
|
print(translate("\u25c6 TeaCache使用: {0}").format(use_teacache)) |
|
|
|
|
|
|
|
|
print(translate("\u25c6 初期SEED値: {0}").format(seed)) |
|
|
print(translate("\u25c6 LoRA使用: {0}").format(use_lora)) |
|
|
|
|
|
|
|
|
print(translate("\u25c6 FP8最適化: {0}").format(fp8_optimization)) |
|
|
|
|
|
|
|
|
print(translate("\u25c6 VAEキャッシュ: {0}").format(use_vae_cache)) |
|
|
print( |
|
|
f"VAEキャッシュ詳細状態: use_vae_cache={use_vae_cache}, type={type(use_vae_cache)}" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
if lora_mode == translate("ディレクトリから選択") and has_lora_support: |
|
|
|
|
|
has_selected_lora = False |
|
|
for dropdown in [lora_dropdown1, lora_dropdown2, lora_dropdown3]: |
|
|
dropdown_value = dropdown.value if hasattr(dropdown, "value") else dropdown |
|
|
if dropdown_value and dropdown_value != translate("なし"): |
|
|
has_selected_lora = True |
|
|
break |
|
|
|
|
|
|
|
|
if has_selected_lora: |
|
|
use_lora = True |
|
|
print( |
|
|
translate( |
|
|
"[INFO] ディレクトリでLoRAが選択されているため、LoRA使用を有効にしました" |
|
|
) |
|
|
) |
|
|
|
|
|
if use_lora and has_lora_support: |
|
|
all_lora_files = [] |
|
|
lora_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lora") |
|
|
|
|
|
|
|
|
lora_mode_value = lora_mode.value if hasattr(lora_mode, "value") else lora_mode |
|
|
if lora_mode_value and lora_mode_value == translate("ディレクトリから選択"): |
|
|
|
|
|
print(translate("[INFO] LoRA読み込み方式: ディレクトリから選択")) |
|
|
|
|
|
|
|
|
selected_lora_names = [] |
|
|
|
|
|
|
|
|
for dropdown, dropdown_name in [ |
|
|
(lora_dropdown1, "LoRA1"), |
|
|
(lora_dropdown2, "LoRA2"), |
|
|
(lora_dropdown3, "LoRA3"), |
|
|
]: |
|
|
|
|
|
dropdown_value = ( |
|
|
dropdown.value if hasattr(dropdown, "value") else dropdown |
|
|
) |
|
|
|
|
|
|
|
|
if ( |
|
|
dropdown_value == 0 |
|
|
or dropdown_value == "0" |
|
|
or dropdown_value == 0.0 |
|
|
): |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] 情報表示: {name}の値が数値0として検出されました。'なし'として扱います" |
|
|
).format(name=dropdown_name) |
|
|
) |
|
|
dropdown_value = translate("なし") |
|
|
|
|
|
|
|
|
if not isinstance(dropdown_value, str) and dropdown_value is not None: |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] 情報表示: {name}の値のタイプ変換が必要: {type}" |
|
|
).format(name=dropdown_name, type=type(dropdown_value).__name__) |
|
|
) |
|
|
dropdown_value = str(dropdown_value) |
|
|
|
|
|
if dropdown_value and dropdown_value != translate("なし"): |
|
|
lora_path = os.path.join(lora_dir, dropdown_value) |
|
|
|
|
|
model_name = f"LoRA{dropdown_name[-1]}: {dropdown_value}" |
|
|
selected_lora_names.append(model_name) |
|
|
|
|
|
|
|
|
if selected_lora_names: |
|
|
print( |
|
|
translate("[INFO] 選択されたLoRAモデル: {0}").format( |
|
|
", ".join(selected_lora_names) |
|
|
) |
|
|
) |
|
|
else: |
|
|
print(translate("[INFO] 有効なLoRAモデルが選択されていません")) |
|
|
else: |
|
|
|
|
|
|
|
|
if lora_files is not None: |
|
|
|
|
|
print(f"[DEBUG] 情報表示: lora_filesの型: {type(lora_files)}") |
|
|
|
|
|
if isinstance(lora_files, list): |
|
|
|
|
|
for file in lora_files: |
|
|
if file is not None: |
|
|
all_lora_files.append(file) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] LoRAファイル1のリスト内に無効なファイルがあります" |
|
|
) |
|
|
) |
|
|
elif lora_files is not None: |
|
|
all_lora_files.append(lora_files) |
|
|
|
|
|
|
|
|
if lora_files2 is not None: |
|
|
|
|
|
print(f"[DEBUG] 情報表示: lora_files2の型: {type(lora_files2)}") |
|
|
|
|
|
if isinstance(lora_files2, list): |
|
|
|
|
|
for file in lora_files2: |
|
|
if file is not None: |
|
|
all_lora_files.append(file) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] LoRAファイル2のリスト内に無効なファイルがあります" |
|
|
) |
|
|
) |
|
|
elif lora_files2 is not None: |
|
|
all_lora_files.append(lora_files2) |
|
|
|
|
|
|
|
|
if lora_files3 is not None: |
|
|
|
|
|
print(f"[DEBUG] 情報表示: lora_files3の型: {type(lora_files3)}") |
|
|
|
|
|
if isinstance(lora_files3, list): |
|
|
|
|
|
for file in lora_files3: |
|
|
if file is not None: |
|
|
all_lora_files.append(file) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"[WARN] LoRAファイル3のリスト内に無効なファイルがあります" |
|
|
) |
|
|
) |
|
|
elif lora_files3 is not None: |
|
|
all_lora_files.append(lora_files3) |
|
|
|
|
|
|
|
|
try: |
|
|
scales = [float(s.strip()) for s in lora_scales_text.split(",")] |
|
|
except: |
|
|
|
|
|
scales = [0.8] * len(all_lora_files) |
|
|
|
|
|
|
|
|
if len(scales) < len(all_lora_files): |
|
|
scales.extend([0.8] * (len(all_lora_files) - len(scales))) |
|
|
elif len(scales) > len(all_lora_files): |
|
|
scales = scales[: len(all_lora_files)] |
|
|
|
|
|
|
|
|
if len(all_lora_files) == 1: |
|
|
|
|
|
print( |
|
|
translate("\u25c6 LoRAファイル: {0}").format( |
|
|
os.path.basename(all_lora_files[0].name) |
|
|
) |
|
|
) |
|
|
print(translate("\u25c6 LoRA適用強度: {0}").format(scales[0])) |
|
|
elif len(all_lora_files) > 1: |
|
|
|
|
|
print(translate("\u25c6 LoRAファイル (複数):")) |
|
|
for i, file in enumerate(all_lora_files): |
|
|
print(f" - {os.path.basename(file.name)} (スケール: {scales[i]})") |
|
|
else: |
|
|
|
|
|
print(translate("\u25c6 LoRA: 使用しない")) |
|
|
|
|
|
print("=============================\n") |
|
|
|
|
|
|
|
|
batch_stopped = False |
|
|
|
|
|
|
|
|
original_seed = seed |
|
|
|
|
|
|
|
|
print( |
|
|
f"[DEBUG] use_random_seed: {use_random_seed}, タイプ: {type(use_random_seed)}" |
|
|
) |
|
|
|
|
|
|
|
|
use_random = False |
|
|
if isinstance(use_random_seed, bool): |
|
|
use_random = use_random_seed |
|
|
elif isinstance(use_random_seed, str): |
|
|
use_random = use_random_seed.lower() in ["true", "yes", "1", "on"] |
|
|
|
|
|
print(f"[DEBUG] 実際のランダムシード使用状態: {use_random}") |
|
|
|
|
|
if use_random: |
|
|
|
|
|
previous_seed = seed |
|
|
|
|
|
seed = random.randint(0, 2**32 - 1) |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"\n[INFO] ランダムシード機能が有効なため、指定されたSEED値 {0} の代わりに新しいSEED値 {1} を使用します。" |
|
|
).format(previous_seed, seed) |
|
|
) |
|
|
|
|
|
yield ( |
|
|
None, |
|
|
None, |
|
|
"", |
|
|
"", |
|
|
gr.update(interactive=False), |
|
|
gr.update(interactive=True), |
|
|
gr.update(value=seed), |
|
|
) |
|
|
|
|
|
original_seed = seed |
|
|
else: |
|
|
print(translate("[INFO] 指定されたSEED値 {0} を使用します。").format(seed)) |
|
|
yield ( |
|
|
None, |
|
|
None, |
|
|
"", |
|
|
"", |
|
|
gr.update(interactive=False), |
|
|
gr.update(interactive=True), |
|
|
gr.update(), |
|
|
) |
|
|
|
|
|
stream = AsyncStream() |
|
|
|
|
|
|
|
|
if batch_stopped: |
|
|
print(translate("\nバッチ処理が中断されました(バッチ開始前)")) |
|
|
yield ( |
|
|
None, |
|
|
gr.update(visible=False), |
|
|
translate("バッチ処理が中断されました"), |
|
|
"", |
|
|
gr.update(interactive=True), |
|
|
gr.update(interactive=False, value=translate("End Generation")), |
|
|
gr.update(), |
|
|
) |
|
|
return |
|
|
|
|
|
|
|
|
for batch_index in range(batch_count): |
|
|
|
|
|
if batch_stopped: |
|
|
print(translate("\nバッチ処理がユーザーによって中止されました")) |
|
|
yield ( |
|
|
None, |
|
|
gr.update(visible=False), |
|
|
translate("バッチ処理が中止されました。"), |
|
|
"", |
|
|
gr.update(interactive=True), |
|
|
gr.update(interactive=False, value=translate("End Generation")), |
|
|
gr.update(), |
|
|
) |
|
|
break |
|
|
|
|
|
|
|
|
if batch_count > 1: |
|
|
batch_info = translate("バッチ処理: {0}/{1}").format( |
|
|
batch_index + 1, batch_count |
|
|
) |
|
|
|
|
|
print(f"\n{batch_info}") |
|
|
|
|
|
yield ( |
|
|
None, |
|
|
gr.update(visible=False), |
|
|
batch_info, |
|
|
"", |
|
|
gr.update(interactive=False), |
|
|
gr.update(interactive=True), |
|
|
gr.update(), |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
use_random = False |
|
|
if isinstance(use_random_seed, bool): |
|
|
use_random = use_random_seed |
|
|
elif isinstance(use_random_seed, str): |
|
|
use_random = use_random_seed.lower() in ["true", "yes", "1", "on"] |
|
|
|
|
|
|
|
|
if batch_count > 1: |
|
|
|
|
|
if not use_random: |
|
|
prev_seed = seed |
|
|
current_seed = original_seed + batch_index |
|
|
|
|
|
seed = current_seed |
|
|
if batch_index > 0: |
|
|
print( |
|
|
translate( |
|
|
"[INFO] バッチ {0}/{1} の処理を開始: SEED値を {2} に設定しました。" |
|
|
).format(batch_index + 1, batch_count, seed) |
|
|
) |
|
|
else: |
|
|
|
|
|
if batch_index > 0: |
|
|
prev_seed = seed |
|
|
seed = random.randint(0, 2**32 - 1) |
|
|
print( |
|
|
translate( |
|
|
"[INFO] バッチ {0}/{1} の処理を開始: 新しいランダムSEED値 {2} を生成しました。" |
|
|
).format(batch_index + 1, batch_count, seed) |
|
|
) |
|
|
|
|
|
|
|
|
print(translate("現在のSEED値: {0}").format(seed)) |
|
|
|
|
|
|
|
|
if batch_stopped: |
|
|
print( |
|
|
translate( |
|
|
"バッチ処理が中断されました。worker関数の実行をキャンセルします。" |
|
|
) |
|
|
) |
|
|
|
|
|
yield ( |
|
|
None, |
|
|
gr.update(visible=False), |
|
|
translate("バッチ処理が中断されました({0}/{1})").format( |
|
|
batch_index, batch_count |
|
|
), |
|
|
"", |
|
|
gr.update(interactive=True), |
|
|
gr.update(interactive=False, value=translate("End Generation")), |
|
|
gr.update(), |
|
|
) |
|
|
break |
|
|
|
|
|
|
|
|
gpu_memory_value = ( |
|
|
float(gpu_memory_preservation) |
|
|
if gpu_memory_preservation is not None |
|
|
else 6.0 |
|
|
) |
|
|
print( |
|
|
translate("Using GPU memory preservation setting: {0} GB").format( |
|
|
gpu_memory_value |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if not output_dir or not output_dir.strip(): |
|
|
output_dir = "outputs" |
|
|
print(translate("Output directory: {0}").format(output_dir)) |
|
|
|
|
|
|
|
|
if input_image is not None: |
|
|
if isinstance(input_image, str): |
|
|
print( |
|
|
translate("[DEBUG] input_image path: {0}, type: {1}").format( |
|
|
input_image, type(input_image) |
|
|
) |
|
|
) |
|
|
else: |
|
|
print( |
|
|
translate("[DEBUG] input_image shape: {0}, type: {1}").format( |
|
|
input_image.shape, type(input_image) |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
print( |
|
|
translate("[DEBUG] async_run直前のsave_tensor_data: {0}").format( |
|
|
save_tensor_data |
|
|
) |
|
|
) |
|
|
print(translate("[DEBUG] async_run直前のLoRA関連パラメータ:")) |
|
|
print( |
|
|
translate(" - lora_mode: {0}, 型: {1}").format( |
|
|
lora_mode, type(lora_mode).__name__ |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate(" - lora_dropdown1: {0!r}, 型: {1}").format( |
|
|
lora_dropdown1, type(lora_dropdown1).__name__ |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate(" - lora_dropdown2: {0!r}, 型: {1}").format( |
|
|
lora_dropdown2, type(lora_dropdown2).__name__ |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate(" - lora_dropdown3: {0!r}, 型: {1}").format( |
|
|
lora_dropdown3, type(lora_dropdown3).__name__ |
|
|
) |
|
|
) |
|
|
print( |
|
|
translate(" - use_lora: {0}, 型: {1}").format( |
|
|
use_lora, type(use_lora).__name__ |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
if lora_mode == translate("ディレクトリから選択") and lora_dropdown2 == 0: |
|
|
print(translate("[DEBUG] lora_dropdown2が数値0になっています")) |
|
|
|
|
|
|
|
|
lora_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)), "lora") |
|
|
if os.path.exists(lora_dir): |
|
|
print(translate("[DEBUG] LoRAディレクトリ内ファイル:")) |
|
|
for filename in os.listdir(lora_dir): |
|
|
if filename.endswith((".safetensors", ".pt", ".bin")): |
|
|
print(f" - {filename}") |
|
|
|
|
|
|
|
|
current_input_image = input_image |
|
|
|
|
|
|
|
|
if isinstance(current_input_image, str): |
|
|
print(translate("処理用入力画像: {0}").format(current_input_image)) |
|
|
|
|
|
async_run( |
|
|
worker, |
|
|
current_input_image, |
|
|
prompt, |
|
|
seed, |
|
|
steps, |
|
|
cfg, |
|
|
gs, |
|
|
rs, |
|
|
gpu_memory_value, |
|
|
use_teacache, |
|
|
mp4_crf, |
|
|
end_frame_strength, |
|
|
keep_section_videos, |
|
|
lora_files, |
|
|
lora_files2, |
|
|
lora_files3, |
|
|
lora_scales_text, |
|
|
output_dir, |
|
|
save_intermediate_frames, |
|
|
use_lora, |
|
|
lora_mode, |
|
|
lora_dropdown1, |
|
|
lora_dropdown2, |
|
|
lora_dropdown3, |
|
|
save_tensor_data, |
|
|
tensor_data_input, |
|
|
trim_start_latent_size, |
|
|
generation_latent_size, |
|
|
combine_mode, |
|
|
fp8_optimization, |
|
|
batch_index, |
|
|
use_vae_cache, |
|
|
) |
|
|
|
|
|
|
|
|
batch_output_filename = None |
|
|
|
|
|
|
|
|
while True: |
|
|
flag, data = stream.output_queue.next() |
|
|
|
|
|
if flag == "file": |
|
|
batch_output_filename = data |
|
|
|
|
|
yield ( |
|
|
batch_output_filename, |
|
|
gr.update(value=None, visible=False), |
|
|
gr.update(), |
|
|
gr.update(), |
|
|
gr.update(interactive=False), |
|
|
gr.update(interactive=True), |
|
|
gr.update(), |
|
|
) |
|
|
|
|
|
if flag == "progress": |
|
|
preview, desc, html = data |
|
|
|
|
|
|
|
|
batch_info = "" |
|
|
if batch_count > 1: |
|
|
batch_info = translate("バッチ処理: {0}/{1} - ").format( |
|
|
batch_index + 1, batch_count |
|
|
) |
|
|
|
|
|
|
|
|
current_seed_info = translate("現在のSEED値: {0}").format(seed) |
|
|
if batch_info: |
|
|
desc = batch_info + desc |
|
|
|
|
|
|
|
|
if current_seed_info not in desc: |
|
|
desc = desc + "\n\n" + current_seed_info |
|
|
|
|
|
yield ( |
|
|
gr.update(), |
|
|
gr.update(visible=True, value=preview), |
|
|
desc, |
|
|
html, |
|
|
gr.update(interactive=False), |
|
|
gr.update(interactive=True), |
|
|
gr.update(), |
|
|
) |
|
|
|
|
|
if flag == "end": |
|
|
|
|
|
if batch_index == batch_count - 1 or batch_stopped: |
|
|
|
|
|
completion_message = "" |
|
|
if batch_stopped: |
|
|
completion_message = translate( |
|
|
"バッチ処理が中止されました({0}/{1})" |
|
|
).format(batch_index + 1, batch_count) |
|
|
else: |
|
|
completion_message = translate( |
|
|
"バッチ処理が完了しました({0}/{1})" |
|
|
).format(batch_count, batch_count) |
|
|
yield ( |
|
|
batch_output_filename, |
|
|
gr.update(value=None, visible=False), |
|
|
completion_message, |
|
|
"", |
|
|
gr.update(interactive=True), |
|
|
gr.update(interactive=False, value=translate("End Generation")), |
|
|
gr.update(), |
|
|
) |
|
|
else: |
|
|
|
|
|
next_batch_message = translate( |
|
|
"バッチ処理: {0}/{1} 完了、次のバッチに進みます..." |
|
|
).format(batch_index + 1, batch_count) |
|
|
yield ( |
|
|
batch_output_filename, |
|
|
gr.update(value=None, visible=False), |
|
|
next_batch_message, |
|
|
"", |
|
|
gr.update(interactive=False), |
|
|
gr.update(interactive=True), |
|
|
gr.update(), |
|
|
) |
|
|
break |
|
|
|
|
|
|
|
|
output_filename = batch_output_filename |
|
|
|
|
|
|
|
|
if batch_stopped: |
|
|
print(translate("バッチ処理ループを中断します")) |
|
|
break |
|
|
|
|
|
|
|
|
def end_process(): |
|
|
global stream |
|
|
global batch_stopped |
|
|
|
|
|
|
|
|
batch_stopped = True |
|
|
print(translate("\n停止ボタンが押されました。バッチ処理を停止します...")) |
|
|
|
|
|
stream.input_queue.push("end") |
|
|
|
|
|
|
|
|
return gr.update(value=translate("停止処理中...")) |
|
|
|
|
|
|
|
|
|
|
|
quick_prompts = [ |
|
|
"A character doing some simple body movements.", |
|
|
"A character uses expressive hand gestures and body language.", |
|
|
"A character walks leisurely with relaxed movements.", |
|
|
"A character performs dynamic movements with energy and flowing motion.", |
|
|
"A character moves in unexpected ways, with surprising transitions poses.", |
|
|
] |
|
|
quick_prompts = [[x] for x in quick_prompts] |
|
|
|
|
|
css = get_app_css() |
|
|
block = gr.Blocks(css=css).queue() |
|
|
with block: |
|
|
gr.HTML('<h1>FramePack<span class="title-suffix">-eichi</span> tensor tool</h1>') |
|
|
|
|
|
with gr.Tab("Generation"): |
|
|
gr.Markdown( |
|
|
translate( |
|
|
"**eichi等で生成した(history_latentsを持つ)テンソルデータを起点とした生成ツールです。テンソルデータの後ろに生成した動画を継ぎ足します。\n注意点:F1モデルを使用していないため品質に課題が出る可能性があります(逆流、ムーンウォーク現象等)。**" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Column(): |
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown( |
|
|
"### " + translate("入力:テンソルデータ"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
with gr.Row(): |
|
|
|
|
|
tensor_data_input = gr.File( |
|
|
label=translate( |
|
|
"テンソルデータアップロード (.safetensors) - eichiにて生成したファイル。生成動画の前方(先頭)または後方(末尾)に結合されます" |
|
|
), |
|
|
file_types=[".safetensors"], |
|
|
type="filepath", |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
preview_tensor_desc = gr.Markdown( |
|
|
"", elem_classes="markdown-desc" |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown( |
|
|
"### " + translate("入力:動画生成で使用する画像、プロンプト"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
with gr.Row(): |
|
|
|
|
|
generation_latent_size = gr.Slider( |
|
|
label=translate("生成フレーム数"), |
|
|
minimum=1, |
|
|
maximum=12, |
|
|
value=9, |
|
|
step=1, |
|
|
interactive=True, |
|
|
info=translate( |
|
|
"新規で生成するフレーム数。6-9推奨。\n設定値と時間の目安:3(0.3秒)、6(0.7秒)、9(1.1秒)、12(1.5秒)" |
|
|
), |
|
|
) |
|
|
with gr.Row(): |
|
|
|
|
|
input_image_state = gr.State( |
|
|
None |
|
|
) |
|
|
|
|
|
|
|
|
input_image = gr.Image( |
|
|
label=translate( |
|
|
"Image - テンソルデータの前方(先頭)または後方(末尾)の動画を生成するための方向性を示す画像" |
|
|
), |
|
|
sources=["upload", "clipboard"], |
|
|
type="filepath", |
|
|
height=320, |
|
|
) |
|
|
|
|
|
|
|
|
def update_input_image_state(image_path): |
|
|
|
|
|
|
|
|
return image_path |
|
|
|
|
|
|
|
|
input_image.change( |
|
|
fn=update_input_image_state, |
|
|
inputs=[input_image], |
|
|
outputs=[input_image_state], |
|
|
) |
|
|
|
|
|
|
|
|
def update_from_image_metadata(image_path, copy_enabled=False): |
|
|
"""Imageアップロード時にメタデータを抽出してUIに反映する |
|
|
copy_enabled: メタデータの複写が有効化されているかどうか |
|
|
""" |
|
|
|
|
|
if not copy_enabled: |
|
|
|
|
|
return [gr.update()] * 2 |
|
|
|
|
|
if image_path is None: |
|
|
|
|
|
return [gr.update()] * 2 |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
|
|
|
|
|
|
metadata = extract_metadata_from_png(image_path) |
|
|
|
|
|
if not metadata: |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"アップロードされた画像にメタデータが含まれていません" |
|
|
) |
|
|
) |
|
|
return [gr.update()] * 2 |
|
|
|
|
|
|
|
|
print( |
|
|
translate( |
|
|
"画像からメタデータを抽出しました: {0}" |
|
|
).format(metadata) |
|
|
) |
|
|
|
|
|
|
|
|
prompt_update = gr.update() |
|
|
seed_update = gr.update() |
|
|
|
|
|
if PROMPT_KEY in metadata and metadata[PROMPT_KEY]: |
|
|
prompt_update = gr.update( |
|
|
value=metadata[PROMPT_KEY] |
|
|
) |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"プロンプトを画像から取得: {0}" |
|
|
).format(metadata[PROMPT_KEY]) |
|
|
) |
|
|
|
|
|
if SEED_KEY in metadata and metadata[SEED_KEY]: |
|
|
|
|
|
try: |
|
|
seed_value = int(metadata[SEED_KEY]) |
|
|
seed_update = gr.update(value=seed_value) |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"SEED値を画像から取得: {0}" |
|
|
).format(seed_value) |
|
|
) |
|
|
except (ValueError, TypeError): |
|
|
|
|
|
print( |
|
|
translate("SEED値の変換エラー: {0}").format( |
|
|
metadata[SEED_KEY] |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
return [prompt_update, seed_update] |
|
|
except Exception as e: |
|
|
|
|
|
|
|
|
print(translate("メタデータ抽出エラー: {0}").format(e)) |
|
|
return [gr.update()] * 2 |
|
|
|
|
|
|
|
|
|
|
|
with gr.Row(variant="compact"): |
|
|
|
|
|
|
|
|
global copy_metadata |
|
|
copy_metadata = gr.Checkbox( |
|
|
label=translate("埋め込みプロンプトおよびシードを複写する"), |
|
|
value=False, |
|
|
info=translate( |
|
|
"チェックをオンにすると、画像のメタデータからプロンプトとシードを自動的に取得します" |
|
|
), |
|
|
visible=False, |
|
|
) |
|
|
|
|
|
|
|
|
copy_metadata_visible = gr.Checkbox( |
|
|
label=translate("埋め込みプロンプトおよびシードを複写する"), |
|
|
value=False, |
|
|
info=translate( |
|
|
"チェックをオンにすると、画像のメタデータからプロンプトとシードを自動的に取得します" |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
copy_metadata_visible.change( |
|
|
fn=lambda x: x, |
|
|
inputs=[copy_metadata_visible], |
|
|
outputs=[copy_metadata], |
|
|
) |
|
|
|
|
|
|
|
|
copy_metadata.change( |
|
|
fn=lambda x: x, |
|
|
inputs=[copy_metadata], |
|
|
outputs=[copy_metadata_visible], |
|
|
queue=False, |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
prompt = gr.Textbox( |
|
|
label=translate( |
|
|
"Prompt - 生成する動画の動きを指示するプロンプト" |
|
|
), |
|
|
value=get_default_startup_prompt(), |
|
|
lines=6, |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown( |
|
|
"### " |
|
|
+ translate("生成動画をテンソルデータの前後どちらに結合するか"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
with gr.Row(): |
|
|
combine_mode = gr.Radio( |
|
|
choices=COMBINE_MODE_OPTIONS_KEYS, |
|
|
value=COMBINE_MODE_DEFAULT, |
|
|
label=translate("生成動画の結合箇所"), |
|
|
interactive=True, |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
trim_start_latent_size = gr.Slider( |
|
|
label=translate("テンソルデータの先頭の削除フレーム数"), |
|
|
minimum=0, |
|
|
maximum=5, |
|
|
value=0, |
|
|
step=1, |
|
|
interactive=False, |
|
|
visible=True, |
|
|
info=translate( |
|
|
"テンソルデータの先頭から削除するフレーム数。用途:生成動画をテンソル動画の先頭に結合するケースで、テンソルデータの先頭部分にノイズがあるとわかっている際に、設定してください。出力動画の品質を確認して長さを調整してください。" |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown( |
|
|
"### " + translate("バッチ設定"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
with gr.Row(): |
|
|
|
|
|
batch_count = gr.Slider( |
|
|
label=translate("バッチ処理回数"), |
|
|
minimum=1, |
|
|
maximum=100, |
|
|
value=1, |
|
|
step=1, |
|
|
info=translate( |
|
|
"同じ設定で連続生成する回数。SEEDは各回で+1されます" |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
start_button = gr.Button( |
|
|
value=translate("Start Generation"), |
|
|
variant="primary", |
|
|
interactive=False, |
|
|
) |
|
|
end_button = gr.Button( |
|
|
value=translate("End Generation"), interactive=False |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown( |
|
|
"### " + translate("詳細設定"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
with gr.Accordion( |
|
|
"", |
|
|
open=False, |
|
|
elem_classes="section-accordion", |
|
|
): |
|
|
|
|
|
use_random_seed_default = True |
|
|
seed_default = ( |
|
|
random.randint(0, 2**32 - 1) |
|
|
if use_random_seed_default |
|
|
else 1 |
|
|
) |
|
|
|
|
|
with gr.Row(variant="compact"): |
|
|
use_random_seed = gr.Checkbox( |
|
|
label=translate("Use Random Seed"), |
|
|
value=use_random_seed_default, |
|
|
) |
|
|
seed = gr.Number( |
|
|
label=translate("Seed"), value=seed_default, precision=0 |
|
|
) |
|
|
|
|
|
def set_random_seed(is_checked): |
|
|
if is_checked: |
|
|
return random.randint(0, 2**32 - 1) |
|
|
else: |
|
|
return gr.update() |
|
|
|
|
|
use_random_seed.change( |
|
|
fn=set_random_seed, inputs=use_random_seed, outputs=seed |
|
|
) |
|
|
|
|
|
steps = gr.Slider( |
|
|
label=translate("Steps"), |
|
|
minimum=1, |
|
|
maximum=100, |
|
|
value=25, |
|
|
step=1, |
|
|
info=translate("Changing this value is not recommended."), |
|
|
) |
|
|
|
|
|
cfg = gr.Slider( |
|
|
label=translate("CFG Scale"), |
|
|
minimum=1.0, |
|
|
maximum=32.0, |
|
|
value=1.0, |
|
|
step=0.01, |
|
|
visible=False, |
|
|
) |
|
|
|
|
|
gs = gr.Slider( |
|
|
label=translate("Distilled CFG Scale"), |
|
|
minimum=1.0, |
|
|
maximum=32.0, |
|
|
value=10.0, |
|
|
step=0.01, |
|
|
info=translate("Changing this value is not recommended."), |
|
|
) |
|
|
|
|
|
rs = gr.Slider( |
|
|
label=translate("CFG Re-Scale"), |
|
|
minimum=0.0, |
|
|
maximum=1.0, |
|
|
value=0.0, |
|
|
step=0.01, |
|
|
visible=False, |
|
|
) |
|
|
|
|
|
|
|
|
end_frame_strength = gr.Slider( |
|
|
label=translate("EndFrame影響度"), |
|
|
minimum=0.01, |
|
|
maximum=1.00, |
|
|
value=1.00, |
|
|
step=0.01, |
|
|
info=translate( |
|
|
"最終フレームが動画全体に与える影響の強さを調整します。値を小さくすると最終フレームの影響が弱まり、最初のフレームに早く移行します。1.00が通常の動作です。" |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Group(visible=has_lora_support) as lora_settings_group: |
|
|
gr.Markdown( |
|
|
"### " + translate("LoRA設定"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
|
|
|
|
|
|
use_lora = gr.Checkbox( |
|
|
label=translate("LoRAを使用する"), |
|
|
value=False, |
|
|
info=translate( |
|
|
"チェックをオンにするとLoRAを使用します(要16GB VRAM以上)" |
|
|
), |
|
|
) |
|
|
|
|
|
|
|
|
lora_mode = gr.Radio( |
|
|
choices=[ |
|
|
translate("ディレクトリから選択"), |
|
|
translate("ファイルアップロード"), |
|
|
], |
|
|
value=translate("ディレクトリから選択"), |
|
|
label=translate("LoRA読み込み方式"), |
|
|
visible=False, |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as lora_upload_group: |
|
|
|
|
|
lora_files = gr.File( |
|
|
label=translate( |
|
|
"LoRAファイル (.safetensors, .pt, .bin)" |
|
|
), |
|
|
file_types=[".safetensors", ".pt", ".bin"], |
|
|
) |
|
|
|
|
|
lora_files2 = gr.File( |
|
|
label=translate( |
|
|
"LoRAファイル2 (.safetensors, .pt, .bin)" |
|
|
), |
|
|
file_types=[".safetensors", ".pt", ".bin"], |
|
|
) |
|
|
|
|
|
lora_files3 = gr.File( |
|
|
label=translate( |
|
|
"LoRAファイル3 (.safetensors, .pt, .bin)" |
|
|
), |
|
|
file_types=[".safetensors", ".pt", ".bin"], |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(visible=False) as lora_dropdown_group: |
|
|
|
|
|
lora_dropdown1 = gr.Dropdown( |
|
|
label=translate("LoRAモデル選択 1"), |
|
|
choices=[], |
|
|
value=None, |
|
|
allow_custom_value=False, |
|
|
) |
|
|
lora_dropdown2 = gr.Dropdown( |
|
|
label=translate("LoRAモデル選択 2"), |
|
|
choices=[], |
|
|
value=None, |
|
|
allow_custom_value=False, |
|
|
) |
|
|
lora_dropdown3 = gr.Dropdown( |
|
|
label=translate("LoRAモデル選択 3"), |
|
|
choices=[], |
|
|
value=None, |
|
|
allow_custom_value=False, |
|
|
) |
|
|
|
|
|
lora_scan_button = gr.Button( |
|
|
translate("LoRAディレクトリを再スキャン"), |
|
|
variant="secondary", |
|
|
) |
|
|
|
|
|
|
|
|
lora_scales_text = gr.Textbox( |
|
|
label=translate("LoRA適用強度 (カンマ区切り)"), |
|
|
value="0.8,0.8,0.8", |
|
|
info=translate( |
|
|
"各LoRAのスケール値をカンマ区切りで入力 (例: 0.8,0.5,0.3)" |
|
|
), |
|
|
visible=False, |
|
|
) |
|
|
lora_blocks_type = gr.Dropdown( |
|
|
label=translate("LoRAブロック選択"), |
|
|
choices=[ |
|
|
"all", |
|
|
"single_blocks", |
|
|
"double_blocks", |
|
|
"db0-9", |
|
|
"db10-19", |
|
|
"sb0-9", |
|
|
"sb10-19", |
|
|
"important", |
|
|
], |
|
|
value="all", |
|
|
info=translate( |
|
|
"選択するブロックタイプ(all=すべて、その他=メモリ節約)" |
|
|
), |
|
|
visible=False, |
|
|
) |
|
|
|
|
|
|
|
|
def scan_lora_directory(): |
|
|
"""./loraディレクトリからLoRAモデルファイルを検索する関数""" |
|
|
lora_dir = os.path.join( |
|
|
os.path.dirname(os.path.abspath(__file__)), "lora" |
|
|
) |
|
|
choices = [] |
|
|
|
|
|
|
|
|
if not os.path.exists(lora_dir): |
|
|
os.makedirs(lora_dir, exist_ok=True) |
|
|
print( |
|
|
translate( |
|
|
"[INFO] LoRAディレクトリが存在しなかったため作成しました: {0}" |
|
|
).format(lora_dir) |
|
|
) |
|
|
|
|
|
|
|
|
for filename in os.listdir(lora_dir): |
|
|
if filename.endswith( |
|
|
(".safetensors", ".pt", ".bin") |
|
|
): |
|
|
choices.append(filename) |
|
|
|
|
|
|
|
|
choices = sorted(choices) |
|
|
|
|
|
|
|
|
none_choice = translate("なし") |
|
|
choices.insert(0, none_choice) |
|
|
|
|
|
|
|
|
for i, choice in enumerate(choices): |
|
|
if not isinstance(choice, str): |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] 選択肢の型変換が必要: インデックス {0}, 型 {1}, 値 {2}" |
|
|
).format(i, type(choice).__name__, choice) |
|
|
) |
|
|
|
|
|
choices[i] = str(choice) |
|
|
|
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[INFO] LoRAディレクトリから{0}個のモデルを検出しました" |
|
|
).format(len(choices) - 1) |
|
|
) |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] 'なし'の値: {0!r}, 型: {1}" |
|
|
).format(choices[0], type(choices[0]).__name__) |
|
|
) |
|
|
|
|
|
|
|
|
if not isinstance(choices[0], str): |
|
|
print( |
|
|
translate( |
|
|
"[重要警告] 'なし'の選択肢が文字列型ではありません!型: {0}" |
|
|
).format(type(choices[0]).__name__) |
|
|
) |
|
|
|
|
|
|
|
|
if choices[0] == 0 or choices[0] == 0.0: |
|
|
print( |
|
|
translate( |
|
|
"[重要警告] 'なし'の選択肢が数値0になっています。修正します。" |
|
|
) |
|
|
) |
|
|
choices[0] = none_choice |
|
|
|
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] scan_lora_directory戻り値: 型={0}, 最初の要素={1!r}" |
|
|
).format( |
|
|
type(choices).__name__, |
|
|
choices[0] if choices else "なし", |
|
|
) |
|
|
) |
|
|
|
|
|
return choices |
|
|
|
|
|
|
|
|
def toggle_lora_settings(use_lora): |
|
|
if use_lora: |
|
|
|
|
|
choices = scan_lora_directory() |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] toggle_lora_settings - 選択肢リスト: {0}" |
|
|
).format(choices) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
for i, choice in enumerate(choices): |
|
|
if not isinstance(choice, str): |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] toggle_lora_settings - 選択肢を文字列に変換: インデックス {0}, 元の値 {1}, 型 {2}" |
|
|
).format( |
|
|
i, choice, type(choice).__name__ |
|
|
) |
|
|
) |
|
|
choices[i] = str(choice) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
return [ |
|
|
gr.update(visible=True), |
|
|
gr.update( |
|
|
visible=False |
|
|
), |
|
|
gr.update( |
|
|
visible=True |
|
|
), |
|
|
gr.update(visible=True), |
|
|
] |
|
|
else: |
|
|
|
|
|
return [ |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=False), |
|
|
] |
|
|
|
|
|
|
|
|
def toggle_lora_mode(mode): |
|
|
if mode == translate("ディレクトリから選択"): |
|
|
|
|
|
|
|
|
choices = scan_lora_directory() |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] toggle_lora_mode - 選択肢リスト: {0}" |
|
|
).format(choices) |
|
|
) |
|
|
|
|
|
|
|
|
for i, choice in enumerate(choices): |
|
|
if not isinstance(choice, str): |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] toggle_lora_mode - 選択肢を文字列に変換: インデックス {0}, 元の値 {1}, 型 {2}" |
|
|
).format( |
|
|
i, choice, type(choice).__name__ |
|
|
) |
|
|
) |
|
|
choices[i] = str(choice) |
|
|
|
|
|
|
|
|
first_choice = choices[0] |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] toggle_lora_mode - 変換後の最初の選択肢: {0}, 型: {1}" |
|
|
).format( |
|
|
first_choice, type(first_choice).__name__ |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
return [ |
|
|
gr.update(visible=False), |
|
|
gr.update(visible=True), |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
] |
|
|
else: |
|
|
|
|
|
return [ |
|
|
gr.update(visible=True), |
|
|
gr.update(visible=False), |
|
|
gr.update(), |
|
|
gr.update(), |
|
|
gr.update(), |
|
|
] |
|
|
|
|
|
|
|
|
def update_lora_dropdowns(): |
|
|
choices = scan_lora_directory() |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] LoRAドロップダウン更新 - 選択肢: {0}" |
|
|
).format(choices) |
|
|
) |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] 最初の選択肢: {0}, 型: {1}" |
|
|
).format(choices[0], type(choices[0]).__name__) |
|
|
) |
|
|
|
|
|
|
|
|
for i, choice in enumerate(choices): |
|
|
if not isinstance(choice, str): |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] update_lora_dropdowns - 選択肢を文字列に変換: インデックス {0}, 値 {1}, 型 {2}" |
|
|
).format(i, choice, type(choice).__name__) |
|
|
) |
|
|
choices[i] = str(choice) |
|
|
|
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] update_lora_dropdowns - ドロップダウン更新完了。選択肢: {0}" |
|
|
).format(choices) |
|
|
) |
|
|
|
|
|
return [ |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
] |
|
|
|
|
|
|
|
|
def initialize_lora_dropdowns(use_lora_val): |
|
|
|
|
|
if use_lora_val: |
|
|
print( |
|
|
translate( |
|
|
"[INFO] UIの初期化時にLoRAドロップダウンを更新します" |
|
|
) |
|
|
) |
|
|
return update_lora_dropdowns() |
|
|
return [gr.update(), gr.update(), gr.update()] |
|
|
|
|
|
|
|
|
previous_lora_mode = translate( |
|
|
"ディレクトリから選択" |
|
|
) |
|
|
|
|
|
|
|
|
def toggle_lora_full_update(use_lora_val): |
|
|
|
|
|
global previous_lora_mode |
|
|
|
|
|
|
|
|
|
|
|
if not use_lora_val: |
|
|
|
|
|
current_mode = getattr( |
|
|
lora_mode, |
|
|
"value", |
|
|
translate("ディレクトリから選択"), |
|
|
) |
|
|
if current_mode: |
|
|
previous_lora_mode = current_mode |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] 前回のLoRAモードを保存: {0}" |
|
|
).format(previous_lora_mode) |
|
|
) |
|
|
|
|
|
|
|
|
settings_updates = toggle_lora_settings(use_lora_val) |
|
|
|
|
|
|
|
|
if use_lora_val: |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] LoRAが有効になりました。前回のモード: {0}" |
|
|
).format(previous_lora_mode) |
|
|
) |
|
|
|
|
|
|
|
|
if previous_lora_mode == translate( |
|
|
"ファイルアップロード" |
|
|
): |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] 前回のモードはファイルアップロードだったため、ファイルアップロードUIを表示します" |
|
|
) |
|
|
) |
|
|
|
|
|
settings_updates[0] = gr.update( |
|
|
visible=True, |
|
|
value=translate("ファイルアップロード"), |
|
|
) |
|
|
settings_updates[1] = gr.update( |
|
|
visible=True |
|
|
) |
|
|
settings_updates[2] = gr.update( |
|
|
visible=False |
|
|
) |
|
|
|
|
|
|
|
|
return settings_updates + [ |
|
|
gr.update(), |
|
|
gr.update(), |
|
|
gr.update(), |
|
|
] |
|
|
else: |
|
|
|
|
|
choices = scan_lora_directory() |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] toggle_lora_full_update - LoRAドロップダウン選択肢: {0}" |
|
|
).format(choices) |
|
|
) |
|
|
|
|
|
|
|
|
dropdown_updates = [ |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
] |
|
|
|
|
|
|
|
|
settings_updates[0] = gr.update( |
|
|
visible=True, |
|
|
value=translate("ディレクトリから選択"), |
|
|
) |
|
|
return settings_updates + dropdown_updates |
|
|
|
|
|
|
|
|
return settings_updates + [ |
|
|
gr.update(), |
|
|
gr.update(), |
|
|
gr.update(), |
|
|
] |
|
|
|
|
|
|
|
|
use_lora.change( |
|
|
fn=toggle_lora_full_update, |
|
|
inputs=[use_lora], |
|
|
outputs=[ |
|
|
lora_mode, |
|
|
lora_upload_group, |
|
|
lora_dropdown_group, |
|
|
lora_scales_text, |
|
|
lora_dropdown1, |
|
|
lora_dropdown2, |
|
|
lora_dropdown3, |
|
|
], |
|
|
) |
|
|
|
|
|
|
|
|
def toggle_lora_mode_with_memory(mode_value): |
|
|
|
|
|
global previous_lora_mode |
|
|
previous_lora_mode = mode_value |
|
|
print( |
|
|
translate("[DEBUG] LoRAモードを変更: {0}").format( |
|
|
mode_value |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
return toggle_lora_mode(mode_value) |
|
|
|
|
|
|
|
|
lora_mode.change( |
|
|
fn=toggle_lora_mode_with_memory, |
|
|
inputs=[lora_mode], |
|
|
outputs=[ |
|
|
lora_upload_group, |
|
|
lora_dropdown_group, |
|
|
lora_dropdown1, |
|
|
lora_dropdown2, |
|
|
lora_dropdown3, |
|
|
], |
|
|
) |
|
|
|
|
|
|
|
|
lora_scan_button.click( |
|
|
fn=update_lora_dropdowns, |
|
|
inputs=[], |
|
|
outputs=[ |
|
|
lora_dropdown1, |
|
|
lora_dropdown2, |
|
|
lora_dropdown3, |
|
|
], |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
def lora_ready_init(): |
|
|
"""LoRAドロップダウンの初期化を行う関数""" |
|
|
print( |
|
|
translate( |
|
|
"[INFO] LoRAドロップダウンの初期化を開始します" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
use_lora_value = getattr(use_lora, "value", False) |
|
|
lora_mode_value = getattr( |
|
|
lora_mode, |
|
|
"value", |
|
|
translate("ディレクトリから選択"), |
|
|
) |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] 初期化時の状態 - use_lora: {0}, lora_mode: {1}" |
|
|
).format(use_lora_value, lora_mode_value) |
|
|
) |
|
|
|
|
|
|
|
|
global previous_lora_mode |
|
|
previous_lora_mode = lora_mode_value |
|
|
|
|
|
if use_lora_value: |
|
|
|
|
|
if lora_mode_value == translate( |
|
|
"ディレクトリから選択" |
|
|
): |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[INFO] ディレクトリから選択モードでLoRAが有効なため、ドロップダウンを初期化します" |
|
|
) |
|
|
) |
|
|
choices = scan_lora_directory() |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] 初期化時のLoRA選択肢: {0}" |
|
|
).format(choices) |
|
|
) |
|
|
return [ |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
gr.update( |
|
|
choices=choices, value=choices[0] |
|
|
), |
|
|
] |
|
|
else: |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[INFO] ファイルアップロードモードでLoRAが有効なため、ドロップダウンは更新しません" |
|
|
) |
|
|
) |
|
|
return [gr.update(), gr.update(), gr.update()] |
|
|
|
|
|
|
|
|
return [gr.update(), gr.update(), gr.update()] |
|
|
|
|
|
|
|
|
lora_init_btn = gr.Button( |
|
|
visible=False, elem_id="lora_init_btn" |
|
|
) |
|
|
lora_init_btn.click( |
|
|
fn=lora_ready_init, |
|
|
inputs=[], |
|
|
outputs=[ |
|
|
lora_dropdown1, |
|
|
lora_dropdown2, |
|
|
lora_dropdown3, |
|
|
], |
|
|
) |
|
|
|
|
|
|
|
|
js_init_code = """ |
|
|
function initLoraDropdowns() { |
|
|
// UIロード後、少し待ってからボタンをクリック |
|
|
setTimeout(function() { |
|
|
// 非表示ボタンを探して自動クリック |
|
|
var initBtn = document.getElementById('lora_init_btn'); |
|
|
if (initBtn) { |
|
|
console.log('LoRAドロップダウン初期化ボタンを自動実行します'); |
|
|
initBtn.click(); |
|
|
} else { |
|
|
console.log('LoRAドロップダウン初期化ボタンが見つかりません'); |
|
|
} |
|
|
}, 1000); // 1秒待ってから実行 |
|
|
} |
|
|
|
|
|
// ページロード時に初期化関数を呼び出し |
|
|
window.addEventListener('load', initLoraDropdowns); |
|
|
""" |
|
|
|
|
|
|
|
|
gr.HTML(f"<script>{js_init_code}</script>") |
|
|
|
|
|
|
|
|
if not has_lora_support: |
|
|
gr.Markdown( |
|
|
translate( |
|
|
"LoRAサポートは現在無効です。lora_utilsモジュールが必要です。" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
def toggle_combine_mode_update(mode): |
|
|
if mode == COMBINE_MODE_OPTIONS_KEYS[0]: |
|
|
return gr.update(visible=True) |
|
|
else: |
|
|
return gr.update(visible=False) |
|
|
|
|
|
|
|
|
combine_mode.change( |
|
|
fn=toggle_combine_mode_update, |
|
|
inputs=[combine_mode], |
|
|
outputs=[trim_start_latent_size], |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
input_image.change( |
|
|
fn=update_from_image_metadata, |
|
|
inputs=[input_image, copy_metadata], |
|
|
outputs=[prompt, seed], |
|
|
) |
|
|
|
|
|
|
|
|
def check_metadata_on_checkbox_change(copy_enabled, image_path): |
|
|
if not copy_enabled or image_path is None: |
|
|
return [gr.update()] * 2 |
|
|
|
|
|
return update_from_image_metadata(image_path, copy_enabled) |
|
|
|
|
|
copy_metadata.change( |
|
|
fn=check_metadata_on_checkbox_change, |
|
|
inputs=[copy_metadata, input_image], |
|
|
outputs=[prompt, seed], |
|
|
) |
|
|
|
|
|
def check_inputs_required(image_file, tensor_file): |
|
|
return gr.update(interactive=bool(image_file and tensor_file)) |
|
|
|
|
|
tensor_data_input.change( |
|
|
check_inputs_required, |
|
|
inputs=[input_image, tensor_data_input], |
|
|
outputs=[start_button], |
|
|
) |
|
|
|
|
|
input_image.change( |
|
|
check_inputs_required, |
|
|
inputs=[input_image, tensor_data_input], |
|
|
outputs=[start_button], |
|
|
) |
|
|
|
|
|
def process_tensor_file(file): |
|
|
|
|
|
if file is not None: |
|
|
try: |
|
|
tensor_path = file.name |
|
|
print( |
|
|
translate("テンソルデータを読み込み: {0}").format( |
|
|
os.path.basename(file) |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
tensor_dict = sf.load_file(tensor_path) |
|
|
|
|
|
|
|
|
print(translate("テンソルデータの内容:")) |
|
|
for key, tensor in tensor_dict.items(): |
|
|
print( |
|
|
f" - {key}: shape={tensor.shape}, dtype={tensor.dtype}" |
|
|
) |
|
|
|
|
|
|
|
|
if "history_latents" in tensor_dict: |
|
|
|
|
|
uploaded_tensor_latents = tensor_dict["history_latents"] |
|
|
if uploaded_tensor_latents.shape[2] > 0: |
|
|
print( |
|
|
translate( |
|
|
"テンソルデータに 'history_latents' が見つかりました。{}" |
|
|
).format(uploaded_tensor_latents.shape) |
|
|
) |
|
|
|
|
|
metadata = ( |
|
|
[ |
|
|
str(v) |
|
|
for v in tensor_dict["metadata"].tolist() |
|
|
] |
|
|
if "metadata" in tensor_dict |
|
|
else ["metadata is not included"] |
|
|
) |
|
|
tensor_info = translate("""#### テンソルデータファイル情報: |
|
|
- keys: {keys} |
|
|
- history_latents: {history_latents_shape} |
|
|
- metadata: {metadata} |
|
|
""").format( |
|
|
keys=", ".join(list(tensor_dict.keys())), |
|
|
history_latents_shape=tensor_dict[ |
|
|
"history_latents" |
|
|
].shape, |
|
|
metadata=", ".join(metadata), |
|
|
) |
|
|
|
|
|
return gr.update(visible=True, value=tensor_info) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"異常: テンソルデータに 'history_latents' が見つかりましたが、サイズが0です。" |
|
|
) |
|
|
) |
|
|
return gr.update(visible=False) |
|
|
else: |
|
|
print( |
|
|
translate( |
|
|
"異常: テンソルデータに 'history_latents' キーが見つかりません" |
|
|
) |
|
|
) |
|
|
return gr.update(visible=False) |
|
|
except Exception as e: |
|
|
print( |
|
|
translate("テンソルデータ読み込みエラー: {0}").format(e) |
|
|
) |
|
|
import traceback |
|
|
|
|
|
traceback.print_exc() |
|
|
return gr.update(visible=False) |
|
|
else: |
|
|
|
|
|
return gr.update( |
|
|
visible=True, |
|
|
value=translate( |
|
|
"ファイルを解除しました(エラーの場合も含む)。" |
|
|
), |
|
|
) |
|
|
|
|
|
tensor_data_input.change( |
|
|
process_tensor_file, |
|
|
inputs=tensor_data_input, |
|
|
outputs=[ |
|
|
preview_tensor_desc, |
|
|
], |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Column(): |
|
|
with gr.Group(): |
|
|
gr.Markdown( |
|
|
"### " + translate("出力:生成動画"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
with gr.Row(): |
|
|
|
|
|
result_video = gr.Video( |
|
|
label=translate("Finished Frames"), |
|
|
autoplay=True, |
|
|
show_share_button=False, |
|
|
height=512, |
|
|
loop=True, |
|
|
format="mp4", |
|
|
interactive=False, |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
progress_desc = gr.Markdown( |
|
|
"", elem_classes="no-generating-animation" |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
progress_bar = gr.HTML( |
|
|
"", elem_classes="no-generating-animation" |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
preview_image = gr.Image( |
|
|
label=translate("Next Latents"), height=200, visible=False |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
section_calc_display = gr.HTML("", label="") |
|
|
|
|
|
|
|
|
with gr.Group(): |
|
|
gr.Markdown( |
|
|
"### " + translate("保存設定"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
with gr.Row(): |
|
|
|
|
|
mp4_crf = gr.Slider( |
|
|
label=translate("MP4 Compression"), |
|
|
minimum=0, |
|
|
maximum=100, |
|
|
value=16, |
|
|
step=1, |
|
|
info=translate( |
|
|
"数値が小さいほど高品質になります。0は無圧縮。黒画面が出る場合は16に設定してください。" |
|
|
), |
|
|
) |
|
|
with gr.Row(): |
|
|
|
|
|
keep_section_videos = gr.Checkbox( |
|
|
label=translate( |
|
|
"完了時に中間動画を残す - チェックがない場合は最終動画のみ保存されます(デフォルトOFF)" |
|
|
), |
|
|
value=False, |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
save_tensor_data = gr.Checkbox( |
|
|
label=translate( |
|
|
"完了時にテンソルデータ(.safetensors)も保存 - このデータを別の動画の前後に結合可能" |
|
|
), |
|
|
value=False, |
|
|
info=translate( |
|
|
"チェックすると、生成されたテンソルデータを保存します。アップロードされたテンソルがあれば、結合したテンソルデータも保存されます。" |
|
|
), |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
save_intermediate_frames = gr.Checkbox( |
|
|
label=translate("中間ファイルの静止画保存"), |
|
|
value=False, |
|
|
info=translate( |
|
|
"完了時に中間ファイルを静止画として保存します" |
|
|
), |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
with gr.Row(equal_height=True): |
|
|
with gr.Column(scale=4): |
|
|
|
|
|
output_dir = gr.Textbox( |
|
|
label=translate( |
|
|
"出力フォルダ名 - 出力先は、webuiフォルダ配下に限定されます" |
|
|
), |
|
|
value=output_folder_name, |
|
|
info=translate( |
|
|
"動画やキーフレーム画像の保存先フォルダ名" |
|
|
), |
|
|
placeholder="outputs", |
|
|
) |
|
|
with gr.Column(scale=1, min_width=100): |
|
|
open_folder_btn = gr.Button( |
|
|
value=translate("📂 保存および出力フォルダを開く"), |
|
|
size="sm", |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Row(visible=False): |
|
|
path_display = gr.Textbox( |
|
|
label=translate("出力フォルダの完全パス"), |
|
|
value=os.path.join(base_path, output_folder_name), |
|
|
interactive=False, |
|
|
) |
|
|
|
|
|
|
|
|
def handle_open_folder_btn(folder_name): |
|
|
"""フォルダ名を保存し、そのフォルダを開く""" |
|
|
if not folder_name or not folder_name.strip(): |
|
|
folder_name = "outputs" |
|
|
|
|
|
|
|
|
folder_path = get_output_folder_path(folder_name) |
|
|
|
|
|
|
|
|
settings = load_settings() |
|
|
old_folder_name = settings.get("output_folder") |
|
|
|
|
|
if old_folder_name != folder_name: |
|
|
settings["output_folder"] = folder_name |
|
|
save_result = save_settings(settings) |
|
|
if save_result: |
|
|
|
|
|
global output_folder_name, outputs_folder |
|
|
output_folder_name = folder_name |
|
|
outputs_folder = folder_path |
|
|
print( |
|
|
translate( |
|
|
"出力フォルダ設定を保存しました: {folder_name}" |
|
|
).format(folder_name=folder_name) |
|
|
) |
|
|
|
|
|
|
|
|
open_output_folder(folder_path) |
|
|
|
|
|
|
|
|
return gr.update(value=folder_name), gr.update( |
|
|
value=folder_path |
|
|
) |
|
|
|
|
|
open_folder_btn.click( |
|
|
fn=handle_open_folder_btn, |
|
|
inputs=[output_dir], |
|
|
outputs=[output_dir, path_display], |
|
|
) |
|
|
|
|
|
with gr.Tab("Performance"): |
|
|
|
|
|
with gr.Row(): |
|
|
fp8_optimization = gr.Checkbox( |
|
|
label=translate("FP8 最適化"), |
|
|
value=True, |
|
|
info=translate( |
|
|
"メモリ使用量を削減し速度を改善(PyTorch 2.1以上が必要)" |
|
|
), |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
available_cuda_memory_gb = round( |
|
|
torch.cuda.get_device_properties(0).total_memory / (1024**3) |
|
|
) |
|
|
default_gpu_memory_preservation_gb = ( |
|
|
6 |
|
|
if available_cuda_memory_gb >= 20 |
|
|
else (8 if available_cuda_memory_gb > 16 else 10) |
|
|
) |
|
|
gpu_memory_preservation = gr.Slider( |
|
|
label=translate( |
|
|
"GPU Memory to Preserve (GB) (smaller = more VRAM usage)" |
|
|
), |
|
|
minimum=6, |
|
|
maximum=128, |
|
|
value=default_gpu_memory_preservation_gb, |
|
|
step=0.1, |
|
|
info=translate( |
|
|
"空けておくGPUメモリ量を指定。小さい値=より多くのVRAMを使用可能=高速、大きい値=より少ないVRAMを使用=安全" |
|
|
), |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
use_teacache = gr.Checkbox( |
|
|
label=translate("Use TeaCache"), |
|
|
value=True, |
|
|
info=translate( |
|
|
"Faster speed, but often makes hands and fingers slightly worse." |
|
|
), |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
use_vae_cache = gr.Checkbox( |
|
|
label=translate("VAEキャッシュを使用"), |
|
|
value=False, |
|
|
info=translate( |
|
|
"デコードを1フレームずつ処理し、速度向上(メモリ使用量増加。VRAM24GB以上推奨。それ以下の場合、メモリスワップで逆に遅くなります)" |
|
|
), |
|
|
) |
|
|
print(f"VAEキャッシュCheckbox初期化: id={id(use_vae_cache)}") |
|
|
|
|
|
def update_vae_cache_state(value): |
|
|
global vae_cache_enabled |
|
|
vae_cache_enabled = value |
|
|
print(f"VAEキャッシュ状態を更新: {vae_cache_enabled}") |
|
|
return None |
|
|
|
|
|
|
|
|
use_vae_cache.change( |
|
|
fn=update_vae_cache_state, inputs=[use_vae_cache], outputs=[] |
|
|
) |
|
|
|
|
|
|
|
|
from eichi_utils import create_vae_settings_ui, get_current_vae_settings_display |
|
|
|
|
|
vae_settings_accordion, vae_controls = create_vae_settings_ui(translate) |
|
|
|
|
|
|
|
|
def update_vae_settings_display(): |
|
|
global vae |
|
|
if vae is not None: |
|
|
current_settings = get_current_vae_settings_display(vae) |
|
|
return current_settings |
|
|
return "VAEがロードされていません" |
|
|
|
|
|
|
|
|
vae_controls["current_settings_md"].value = update_vae_settings_display() |
|
|
|
|
|
with gr.Tab("Presets"): |
|
|
|
|
|
with gr.Group(visible=True) as prompt_management: |
|
|
gr.Markdown( |
|
|
"### " + translate("プロンプト管理"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(visible=True): |
|
|
|
|
|
default_prompt = "" |
|
|
default_name = "" |
|
|
for preset in load_presets()["presets"]: |
|
|
if preset.get("is_startup_default", False): |
|
|
default_prompt = preset["prompt"] |
|
|
default_name = preset["name"] |
|
|
break |
|
|
|
|
|
with gr.Row(): |
|
|
edit_name = gr.Textbox( |
|
|
label=translate("プリセット名"), |
|
|
placeholder=translate("名前を入力..."), |
|
|
value=default_name, |
|
|
) |
|
|
|
|
|
edit_prompt = gr.Textbox( |
|
|
label=translate("プロンプト"), lines=5, value=default_prompt |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
default_preset = translate("起動時デフォルト") |
|
|
|
|
|
presets_data = load_presets() |
|
|
choices = [preset["name"] for preset in presets_data["presets"]] |
|
|
default_presets = [ |
|
|
name |
|
|
for name in choices |
|
|
if any( |
|
|
p["name"] == name and p.get("is_default", False) |
|
|
for p in presets_data["presets"] |
|
|
) |
|
|
] |
|
|
user_presets = [ |
|
|
name for name in choices if name not in default_presets |
|
|
] |
|
|
sorted_choices = [ |
|
|
(name, name) |
|
|
for name in sorted(default_presets) + sorted(user_presets) |
|
|
] |
|
|
preset_dropdown = gr.Dropdown( |
|
|
label=translate("プリセット"), |
|
|
choices=sorted_choices, |
|
|
value=default_preset, |
|
|
type="value", |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
save_btn = gr.Button(value=translate("保存"), variant="primary") |
|
|
apply_preset_btn = gr.Button( |
|
|
value=translate("反映"), variant="primary" |
|
|
) |
|
|
clear_btn = gr.Button(value=translate("クリア")) |
|
|
delete_preset_btn = gr.Button(value=translate("削除")) |
|
|
|
|
|
|
|
|
result_message = gr.Markdown("") |
|
|
|
|
|
with gr.Tab("Tools"): |
|
|
|
|
|
with gr.Group(visible=True): |
|
|
gr.Markdown( |
|
|
"### " + translate("テンソルファイルの結合"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
gr.Markdown( |
|
|
translate( |
|
|
"safetensors形式のテンソルファイルを2つ選択して結合します。結合順序は「テンソル1 + テンソル2」です。" |
|
|
), |
|
|
elem_classes="markdown-desc", |
|
|
) |
|
|
with gr.Row(): |
|
|
with gr.Column(elem_classes="group-border"): |
|
|
gr.Markdown( |
|
|
"#### " + translate("入力"), elem_classes="markdown-subtitle" |
|
|
) |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
tool_tensor_file1 = gr.File( |
|
|
label=translate("テンソルファイル1 (.safetensors)"), |
|
|
file_types=[".safetensors"], |
|
|
height=200, |
|
|
) |
|
|
with gr.Column(scale=1): |
|
|
tool_tensor_file2 = gr.File( |
|
|
label=translate("テンソルファイル2 (.safetensors)"), |
|
|
file_types=[".safetensors"], |
|
|
height=200, |
|
|
) |
|
|
|
|
|
with gr.Column(elem_classes="group-border"): |
|
|
gr.Markdown( |
|
|
"#### " + translate("出力"), elem_classes="markdown-subtitle" |
|
|
) |
|
|
|
|
|
|
|
|
tool_combined_tensor_data_desc = gr.Markdown( |
|
|
"", |
|
|
elem_classes="markdown-desc", |
|
|
height=240, |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
tool_combine_btn = gr.Button( |
|
|
translate("テンソルファイルを結合"), |
|
|
variant="primary", |
|
|
interactive=False, |
|
|
) |
|
|
|
|
|
def check_tool_tensor_files(file1, file2): |
|
|
|
|
|
return gr.update(interactive=bool(file1 and file2)) |
|
|
|
|
|
tool_tensor_file1.change( |
|
|
check_tool_tensor_files, |
|
|
inputs=[tool_tensor_file1, tool_tensor_file2], |
|
|
outputs=tool_combine_btn, |
|
|
) |
|
|
|
|
|
tool_tensor_file2.change( |
|
|
check_tool_tensor_files, |
|
|
inputs=[tool_tensor_file1, tool_tensor_file2], |
|
|
outputs=tool_combine_btn, |
|
|
) |
|
|
|
|
|
def combine_tensor_files(file1_path, file2_path, output_path=None): |
|
|
"""2つのsafetensorsファイルを読み込み、結合して新しいファイルに保存する |
|
|
|
|
|
Args: |
|
|
file1_path (str): 1つ目のsafetensorsファイルパス |
|
|
file2_path (str): 2つ目のsafetensorsファイルパス |
|
|
output_path (str, optional): 出力ファイルパス。指定しない場合は自動生成 |
|
|
|
|
|
Returns: |
|
|
tuple: (成功したかどうかのbool, 出力ファイルパス, 結果メッセージ) |
|
|
""" |
|
|
try: |
|
|
|
|
|
print( |
|
|
translate("ファイル1を読み込み中: {0}").format( |
|
|
os.path.basename(file1_path) |
|
|
) |
|
|
) |
|
|
tensor_dict1 = sf.load_file(file1_path) |
|
|
|
|
|
|
|
|
print( |
|
|
translate("ファイル2を読み込み中: {0}").format( |
|
|
os.path.basename(file2_path) |
|
|
) |
|
|
) |
|
|
tensor_dict2 = sf.load_file(file2_path) |
|
|
|
|
|
|
|
|
if ( |
|
|
"history_latents" in tensor_dict1 |
|
|
and "history_latents" in tensor_dict2 |
|
|
): |
|
|
tensor1 = tensor_dict1["history_latents"] |
|
|
tensor2 = tensor_dict2["history_latents"] |
|
|
|
|
|
|
|
|
print( |
|
|
translate( |
|
|
"テンソル1: shape={0}, dtype={1}, フレーム数={2}" |
|
|
).format(tensor1.shape, tensor1.dtype, tensor1.shape[2]) |
|
|
) |
|
|
print( |
|
|
translate( |
|
|
"テンソル2: shape={0}, dtype={1}, フレーム数={2}" |
|
|
).format(tensor2.shape, tensor2.dtype, tensor2.shape[2]) |
|
|
) |
|
|
|
|
|
|
|
|
if ( |
|
|
tensor1.shape[3] != tensor2.shape[3] |
|
|
or tensor1.shape[4] != tensor2.shape[4] |
|
|
): |
|
|
error_msg = translate( |
|
|
"エラー: テンソルサイズが異なります: {0} vs {1}" |
|
|
).format(tensor1.shape, tensor2.shape) |
|
|
print(error_msg) |
|
|
return False, None, error_msg |
|
|
|
|
|
|
|
|
if tensor1.dtype != tensor2.dtype: |
|
|
print( |
|
|
translate("データ型の変換: {0} → {1}").format( |
|
|
tensor2.dtype, tensor1.dtype |
|
|
) |
|
|
) |
|
|
tensor2 = tensor2.to(dtype=tensor1.dtype) |
|
|
|
|
|
|
|
|
tensor1 = tensor1.cpu() |
|
|
tensor2 = tensor2.cpu() |
|
|
|
|
|
|
|
|
combined_tensor = torch.cat([tensor1, tensor2], dim=2) |
|
|
|
|
|
|
|
|
tensor1_frames = tensor1.shape[2] |
|
|
tensor2_frames = tensor2.shape[2] |
|
|
combined_frames = combined_tensor.shape[2] |
|
|
print( |
|
|
translate( |
|
|
"結合成功: 結合後のフレーム数={0} ({1}+{2}フレーム)" |
|
|
).format(combined_frames, tensor1_frames, tensor2_frames) |
|
|
) |
|
|
|
|
|
|
|
|
height, width = tensor1.shape[3], tensor1.shape[4] |
|
|
metadata = torch.tensor( |
|
|
[height, width, combined_frames], dtype=torch.int32 |
|
|
) |
|
|
|
|
|
|
|
|
if output_path is None: |
|
|
timestamp = datetime.now().strftime("%y%m%d_%H%M%S") |
|
|
output_dir = os.path.dirname(file1_path) |
|
|
output_path = os.path.join( |
|
|
output_dir, f"{timestamp}_combined.safetensors" |
|
|
) |
|
|
|
|
|
|
|
|
tensor_dict = { |
|
|
"history_latents": combined_tensor, |
|
|
"metadata": metadata, |
|
|
} |
|
|
|
|
|
|
|
|
sf.save_file(tensor_dict, output_path) |
|
|
|
|
|
|
|
|
tensor_size_mb = ( |
|
|
combined_tensor.element_size() * combined_tensor.nelement() |
|
|
) / (1024 * 1024) |
|
|
|
|
|
|
|
|
|
|
|
metadata = ( |
|
|
[str(v) for v in tensor_dict["metadata"].tolist()] |
|
|
if "metadata" in tensor_dict |
|
|
else ["metadata is not included"] |
|
|
) |
|
|
info_text = translate("""結合成功 |
|
|
#### 結合後のテンソルファイル情報: |
|
|
- ファイル名: {filename} |
|
|
- フレーム数: {frames}フレーム ({frames1}+{frames2}フレーム) |
|
|
- サイズ: {tensor_size_mb:.2f}MB |
|
|
- keys: {keys} |
|
|
- history_latents: {history_latents_shape} |
|
|
- metadata: {metadata} |
|
|
""").format( |
|
|
filename=output_path, |
|
|
frames=combined_frames, |
|
|
frames1=tensor1_frames, |
|
|
frames2=tensor2_frames, |
|
|
tensor_size_mb=tensor_size_mb, |
|
|
keys=", ".join(list(tensor_dict.keys())), |
|
|
history_latents_shape=tensor_dict["history_latents"].shape, |
|
|
metadata=", ".join(metadata), |
|
|
) |
|
|
|
|
|
return True, output_path, info_text |
|
|
else: |
|
|
error_msg = translate( |
|
|
"エラー: テンソルファイルに必要なキー'history_latents'がありません" |
|
|
) |
|
|
print(error_msg) |
|
|
return False, None, error_msg |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = translate("テンソル結合中にエラーが発生: {0}").format(e) |
|
|
print(error_msg) |
|
|
traceback.print_exc() |
|
|
return False, None, error_msg |
|
|
|
|
|
def combine_tensors(file1, file2): |
|
|
if file1 is None or file2 is None: |
|
|
return translate("エラー: 2つのテンソルファイルを選択してください") |
|
|
|
|
|
file1_path = file1.name |
|
|
file2_path = file2.name |
|
|
|
|
|
|
|
|
job_id = generate_timestamp() + "_combined" |
|
|
output_filename = os.path.join(outputs_folder, f"{job_id}.safetensors") |
|
|
|
|
|
success, result_path, message = combine_tensor_files( |
|
|
file1_path, file2_path, output_filename |
|
|
) |
|
|
if success: |
|
|
return message |
|
|
else: |
|
|
return translate("結合失敗: {0}").format(message) |
|
|
|
|
|
def disable_tool_combine_btn(): |
|
|
return gr.update(interactive=False) |
|
|
|
|
|
def enable_tool_combine_btn(): |
|
|
return gr.update(interactive=True) |
|
|
|
|
|
tool_combine_btn.click( |
|
|
disable_tool_combine_btn, |
|
|
inputs=[], |
|
|
outputs=tool_combine_btn, |
|
|
queue=True, |
|
|
).then( |
|
|
combine_tensors, |
|
|
inputs=[tool_tensor_file1, tool_tensor_file2], |
|
|
outputs=[tool_combined_tensor_data_desc], |
|
|
queue=True, |
|
|
).then( |
|
|
enable_tool_combine_btn, |
|
|
inputs=[], |
|
|
outputs=tool_combine_btn, |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(visible=True): |
|
|
gr.Markdown( |
|
|
"### " + translate("テンソルファイルのMP4化"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
gr.Markdown( |
|
|
translate("safetensors形式のテンソルファイルをMP4動画にします。"), |
|
|
elem_classes="markdown-desc", |
|
|
) |
|
|
with gr.Row(): |
|
|
with gr.Column(elem_classes="group-border"): |
|
|
gr.Markdown( |
|
|
"#### " + translate("入力"), elem_classes="markdown-subtitle" |
|
|
) |
|
|
|
|
|
tool_tensor_data_input = gr.File( |
|
|
label=translate( |
|
|
"テンソルデータアップロード (.safetensors) - eichiにて生成したファイル" |
|
|
), |
|
|
file_types=[".safetensors"], |
|
|
type="filepath", |
|
|
height=200, |
|
|
) |
|
|
|
|
|
|
|
|
tool_preview_tensor_desc = gr.Markdown( |
|
|
"", elem_classes="markdown-desc" |
|
|
) |
|
|
|
|
|
with gr.Column(elem_classes="group-border"): |
|
|
gr.Markdown( |
|
|
"#### " + translate("出力"), elem_classes="markdown-subtitle" |
|
|
) |
|
|
|
|
|
tool_tensor_video = gr.Video( |
|
|
label=translate("Tensor file frames"), |
|
|
autoplay=True, |
|
|
show_share_button=False, |
|
|
height=256, |
|
|
loop=False, |
|
|
format="mp4", |
|
|
interactive=False, |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
tool_create_mp4_button = gr.Button( |
|
|
value=translate("MP4ファイル作成"), |
|
|
variant="primary", |
|
|
interactive=False, |
|
|
) |
|
|
|
|
|
def create_mp4_from_tensor(file, mp4_crf): |
|
|
if file is not None: |
|
|
tensor_path = file.name |
|
|
|
|
|
tensor_dict = sf.load_file(tensor_path) |
|
|
|
|
|
uploaded_tensor_latents = tensor_dict["history_latents"] |
|
|
job_id = generate_timestamp() + "_tensor_to_mp4" |
|
|
|
|
|
if not high_vram: |
|
|
|
|
|
preserved_memory_offload = 8.0 |
|
|
print( |
|
|
translate( |
|
|
"Offloading transformer with memory preservation: {0} GB" |
|
|
).format(preserved_memory_offload) |
|
|
) |
|
|
offload_model_from_device_for_memory_preservation( |
|
|
transformer, |
|
|
target_device=gpu, |
|
|
preserved_memory_gb=preserved_memory_offload, |
|
|
) |
|
|
load_model_as_complete(vae, target_device=gpu) |
|
|
|
|
|
uploaded_tensor_pixels, _ = process_tensor_chunks( |
|
|
tensor=uploaded_tensor_latents, |
|
|
frames=uploaded_tensor_latents.shape[2], |
|
|
use_vae_cache=use_vae_cache.value, |
|
|
job_id=job_id, |
|
|
outputs_folder=outputs_folder, |
|
|
mp4_crf=mp4_crf, |
|
|
stream=stream, |
|
|
vae=vae, |
|
|
) |
|
|
|
|
|
input_tensor_output_filename = os.path.join( |
|
|
outputs_folder, f"{job_id}_input_safetensors.mp4" |
|
|
) |
|
|
save_bcthw_as_mp4( |
|
|
uploaded_tensor_pixels, |
|
|
input_tensor_output_filename, |
|
|
fps=30, |
|
|
crf=mp4_crf, |
|
|
) |
|
|
print( |
|
|
translate( |
|
|
"入力されたテンソルデータの動画を保存しました: {input_tensor_output_filename}" |
|
|
).format( |
|
|
input_tensor_output_filename=input_tensor_output_filename |
|
|
) |
|
|
) |
|
|
return gr.update(value=input_tensor_output_filename) |
|
|
else: |
|
|
return gr.update() |
|
|
|
|
|
def disable_tool_create_mp4_button(): |
|
|
return gr.update(interactive=False) |
|
|
|
|
|
def enable_tool_create_mp4_button(file): |
|
|
if file: |
|
|
return gr.update(interactive=True) |
|
|
else: |
|
|
return gr.update(interactive=False) |
|
|
|
|
|
tool_tensor_data_input.change( |
|
|
process_tensor_file, |
|
|
inputs=tool_tensor_data_input, |
|
|
outputs=tool_preview_tensor_desc, |
|
|
).then( |
|
|
enable_tool_create_mp4_button, |
|
|
inputs=tool_tensor_data_input, |
|
|
outputs=tool_create_mp4_button, |
|
|
) |
|
|
|
|
|
tool_create_mp4_button.click( |
|
|
disable_tool_create_mp4_button, |
|
|
inputs=[], |
|
|
outputs=tool_create_mp4_button, |
|
|
queue=True, |
|
|
).then( |
|
|
create_mp4_from_tensor, |
|
|
inputs=[tool_tensor_data_input, mp4_crf], |
|
|
outputs=tool_tensor_video, |
|
|
queue=True, |
|
|
).then( |
|
|
enable_tool_create_mp4_button, |
|
|
inputs=tool_tensor_data_input, |
|
|
outputs=tool_create_mp4_button, |
|
|
) |
|
|
|
|
|
|
|
|
with gr.Group(visible=True): |
|
|
gr.Markdown( |
|
|
"### " + translate("MP4ファイルのテンソルファイル化"), |
|
|
elem_classes="markdown-title", |
|
|
) |
|
|
gr.Markdown( |
|
|
translate( |
|
|
"MP4ファイルをeichiで使用可能なsafetensors形式のテンソルファイルにします。" |
|
|
), |
|
|
elem_classes="markdown-desc", |
|
|
) |
|
|
with gr.Row(): |
|
|
with gr.Column(elem_classes="group-border"): |
|
|
gr.Markdown( |
|
|
"#### " + translate("入力"), elem_classes="markdown-subtitle" |
|
|
) |
|
|
|
|
|
tool_mp4_data_input = gr.File( |
|
|
label=translate("MP4ファイルアップロード (.mp4)"), |
|
|
file_types=[".mp4"], |
|
|
type="filepath", |
|
|
height=200, |
|
|
) |
|
|
|
|
|
|
|
|
tool_preview_mp4_desc = gr.Markdown( |
|
|
"", elem_classes="markdown-desc" |
|
|
) |
|
|
|
|
|
with gr.Column(elem_classes="group-border"): |
|
|
gr.Markdown( |
|
|
"#### " + translate("出力"), elem_classes="markdown-subtitle" |
|
|
) |
|
|
|
|
|
tool_tensor_data_desc = gr.Markdown( |
|
|
"", |
|
|
elem_classes="markdown-desc", |
|
|
height=240, |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
|
|
|
tool_create_tensor_button = gr.Button( |
|
|
value=translate("テンソルファイル作成"), |
|
|
variant="primary", |
|
|
interactive=False, |
|
|
) |
|
|
|
|
|
def process_mp4_file(file): |
|
|
"""アップロードされたMP4ファイルの情報を表示する |
|
|
|
|
|
Args: |
|
|
file: アップロードされたMP4ファイルパス |
|
|
|
|
|
Returns: |
|
|
tuple: (プレビュー説明文, ボタンの有効/無効状態) |
|
|
""" |
|
|
|
|
|
if file is not None: |
|
|
try: |
|
|
mp4_path = file.name |
|
|
|
|
|
cap = cv2.VideoCapture(mp4_path) |
|
|
|
|
|
if not cap.isOpened(): |
|
|
return ( |
|
|
translate("エラー: MP4ファイルを開けませんでした"), |
|
|
gr.update(interactive=False), |
|
|
) |
|
|
|
|
|
|
|
|
frame_count = int(cap.get(cv2.CAP_PROP_FRAME_COUNT)) |
|
|
fps = cap.get(cv2.CAP_PROP_FPS) |
|
|
width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) |
|
|
height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) |
|
|
duration = frame_count / fps if fps > 0 else 0 |
|
|
|
|
|
|
|
|
cap.release() |
|
|
|
|
|
|
|
|
info_text = translate("""#### MP4ファイル情報: |
|
|
- ファイル名: {filename} |
|
|
- フレーム数: {frames} |
|
|
- フレームレート: {fps:.2f} fps |
|
|
- 解像度: H{height}xW{width} |
|
|
- 長さ: {duration:.2f} 秒 |
|
|
""").format( |
|
|
filename=os.path.basename(mp4_path), |
|
|
frames=frame_count, |
|
|
fps=fps, |
|
|
width=width, |
|
|
height=height, |
|
|
duration=duration, |
|
|
) |
|
|
|
|
|
|
|
|
return ( |
|
|
info_text, |
|
|
gr.update(interactive=True), |
|
|
) |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = translate( |
|
|
"MP4ファイルの読み込み中にエラーが発生: {0}" |
|
|
).format(str(e)) |
|
|
print(error_msg) |
|
|
traceback.print_exc() |
|
|
return ( |
|
|
error_msg, |
|
|
gr.update(interactive=False), |
|
|
) |
|
|
|
|
|
|
|
|
return ( |
|
|
translate("MP4ファイルを選択してください"), |
|
|
gr.update(interactive=False), |
|
|
) |
|
|
|
|
|
tool_mp4_data_input.change( |
|
|
process_mp4_file, |
|
|
inputs=tool_mp4_data_input, |
|
|
outputs=[ |
|
|
tool_preview_mp4_desc, |
|
|
tool_create_tensor_button, |
|
|
], |
|
|
) |
|
|
|
|
|
def create_tensor_from_mp4(file): |
|
|
"""MP4ファイルからテンソルデータを生成する |
|
|
|
|
|
Args: |
|
|
file: アップロードされたMP4ファイルパス |
|
|
|
|
|
Returns: |
|
|
gr.update: video要素の更新情報 |
|
|
""" |
|
|
try: |
|
|
if file is not None: |
|
|
mp4_path = file.name |
|
|
job_id = generate_timestamp() + "_mp4_to_tensor" |
|
|
|
|
|
print( |
|
|
translate("[INFO] MP4ファイルの処理を開始: {0}").format( |
|
|
mp4_path |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
if not high_vram: |
|
|
load_model_as_complete(vae, target_device=gpu) |
|
|
|
|
|
|
|
|
cap = cv2.VideoCapture(mp4_path) |
|
|
frames = [] |
|
|
|
|
|
while True: |
|
|
ret, frame = cap.read() |
|
|
if not ret: |
|
|
break |
|
|
|
|
|
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) |
|
|
frames.append(frame) |
|
|
|
|
|
cap.release() |
|
|
|
|
|
|
|
|
if len(frames) == 0: |
|
|
raise ValueError( |
|
|
translate("MP4ファイルにフレームが含まれていません") |
|
|
) |
|
|
|
|
|
print( |
|
|
translate("[INFO] 読み込んだフレーム数: {0}").format( |
|
|
len(frames) |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
frames_tensor = torch.from_numpy(np.stack(frames)) |
|
|
|
|
|
frames_tensor = frames_tensor.permute(3, 0, 1, 2) |
|
|
|
|
|
frames_tensor = frames_tensor.float() / 127.5 - 1.0 |
|
|
|
|
|
frames_tensor = frames_tensor.unsqueeze(0) |
|
|
|
|
|
print( |
|
|
translate(" - frames_tensorの形状: {0}").format( |
|
|
frames_tensor.shape |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
frames_tensor = ensure_tensor_properties( |
|
|
frames_tensor, vae.device |
|
|
) |
|
|
latents = vae_encode(frames_tensor, vae) |
|
|
latents = latents.to(torch.float16) |
|
|
|
|
|
print(translate("[INFO] VAEエンコード完了")) |
|
|
print(translate(" - Latentsの形状: {0}").format(latents.shape)) |
|
|
|
|
|
|
|
|
tensor_output_path = os.path.join( |
|
|
outputs_folder, f"{job_id}.safetensors" |
|
|
) |
|
|
|
|
|
|
|
|
metadata = torch.tensor( |
|
|
[ |
|
|
frames_tensor.shape[3], |
|
|
frames_tensor.shape[4], |
|
|
latents.shape[2], |
|
|
], |
|
|
dtype=torch.int32, |
|
|
) |
|
|
|
|
|
tensor_dict = { |
|
|
"history_latents": latents, |
|
|
"metadata": metadata, |
|
|
} |
|
|
sf.save_file(tensor_dict, tensor_output_path) |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[INFO] テンソルデータを保存しました: {0}" |
|
|
).format(tensor_output_path) |
|
|
) |
|
|
|
|
|
|
|
|
decoded_pixels, _ = process_tensor_chunks( |
|
|
tensor=latents, |
|
|
frames=latents.shape[2], |
|
|
use_vae_cache=vae_cache_enabled, |
|
|
job_id=job_id, |
|
|
outputs_folder=outputs_folder, |
|
|
mp4_crf=16, |
|
|
stream=stream, |
|
|
vae=vae, |
|
|
) |
|
|
|
|
|
print( |
|
|
translate(" - decoded_pixelsの形状: {0}").format( |
|
|
decoded_pixels.shape |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
metadata = ( |
|
|
[str(v) for v in tensor_dict["metadata"].tolist()] |
|
|
if "metadata" in tensor_dict |
|
|
else ["metadata is not included"] |
|
|
) |
|
|
tensor_info = translate("""#### テンソルファイル情報: |
|
|
- 出力先: {file_path} |
|
|
- keys: {keys} |
|
|
- history_latents: {history_latents_shape} |
|
|
- metadata: {metadata} |
|
|
""").format( |
|
|
file_path=tensor_output_path, |
|
|
keys=", ".join(list(tensor_dict.keys())), |
|
|
history_latents_shape=tensor_dict["history_latents"].shape, |
|
|
metadata=", ".join(metadata), |
|
|
) |
|
|
|
|
|
|
|
|
return gr.update(value=tensor_info) |
|
|
else: |
|
|
return gr.update() |
|
|
except Exception as e: |
|
|
error_msg = translate( |
|
|
"MP4からテンソルデータの生成中にエラーが発生: {0}" |
|
|
).format(str(e)) |
|
|
print(error_msg) |
|
|
traceback.print_exc() |
|
|
return gr.update() |
|
|
|
|
|
def disable_tool_create_tensor_button(): |
|
|
return gr.update(interactive=False) |
|
|
|
|
|
def enable_tool_create_tensor_button(): |
|
|
return gr.update(interactive=True) |
|
|
|
|
|
tool_create_tensor_button.click( |
|
|
disable_tool_create_tensor_button, |
|
|
inputs=[], |
|
|
outputs=tool_create_tensor_button, |
|
|
queue=True, |
|
|
).then( |
|
|
create_tensor_from_mp4, |
|
|
inputs=[tool_mp4_data_input], |
|
|
outputs=tool_tensor_data_desc, |
|
|
queue=True, |
|
|
).then( |
|
|
enable_tool_create_tensor_button, |
|
|
inputs=[], |
|
|
outputs=tool_create_tensor_button, |
|
|
) |
|
|
|
|
|
|
|
|
def validate_and_process( |
|
|
input_image, |
|
|
prompt, |
|
|
seed, |
|
|
steps, |
|
|
cfg, |
|
|
gs, |
|
|
rs, |
|
|
gpu_memory_preservation, |
|
|
use_teacache, |
|
|
mp4_crf=16, |
|
|
end_frame_strength=1.0, |
|
|
keep_section_videos=False, |
|
|
lora_files=None, |
|
|
lora_files2=None, |
|
|
lora_files3=None, |
|
|
lora_scales_text="0.8,0.8,0.8", |
|
|
output_dir=None, |
|
|
save_intermediate_frames=False, |
|
|
use_lora=False, |
|
|
lora_mode=None, |
|
|
lora_dropdown1=None, |
|
|
lora_dropdown2=None, |
|
|
lora_dropdown3=None, |
|
|
save_tensor_data=False, |
|
|
tensor_data_input=None, |
|
|
trim_start_latent_size=0, |
|
|
generation_latent_size=0, |
|
|
combine_mode=COMBINE_MODE_DEFAULT, |
|
|
fp8_optimization=False, |
|
|
batch_count=1, |
|
|
use_vae_cache=False, |
|
|
): |
|
|
|
|
|
global \ |
|
|
batch_stopped, \ |
|
|
queue_enabled, \ |
|
|
queue_type, \ |
|
|
prompt_queue_file_path, \ |
|
|
vae_cache_enabled, \ |
|
|
image_queue_files |
|
|
|
|
|
print("=== 入力パラメーター型情報 ===") |
|
|
print("=========================") |
|
|
"""入力画像または最後のキーフレーム画像のいずれかが有効かどうかを確認し、問題がなければ処理を実行する""" |
|
|
|
|
|
print(f"validate_and_process 引数数: {len(locals())}") |
|
|
|
|
|
|
|
|
print(f"[DEBUG] LoRA関連の引数確認") |
|
|
print(f"[DEBUG] lora_files: {lora_files}, 型: {type(lora_files)}") |
|
|
print(f"[DEBUG] lora_files2: {lora_files2}, 型: {type(lora_files2)}") |
|
|
print(f"[DEBUG] lora_files3: {lora_files3}, 型: {type(lora_files3)}") |
|
|
print( |
|
|
f"[DEBUG] lora_scales_text: {lora_scales_text}, 型: {type(lora_scales_text)}" |
|
|
) |
|
|
print(f"[DEBUG] use_lora: {use_lora}, 型: {type(use_lora)}") |
|
|
print(f"[DEBUG] lora_mode: {lora_mode}, 型: {type(lora_mode)}") |
|
|
print(f"[DEBUG] lora_dropdown1: {lora_dropdown1}, 型: {type(lora_dropdown1)}") |
|
|
print(f"[DEBUG] lora_dropdown2: {lora_dropdown2}, 型: {type(lora_dropdown2)}") |
|
|
print(f"[DEBUG] lora_dropdown3: {lora_dropdown3}, 型: {type(lora_dropdown3)}") |
|
|
|
|
|
|
|
|
print(f"[DEBUG] batch_count: {batch_count}, 型: {type(batch_count)}") |
|
|
|
|
|
|
|
|
print( |
|
|
f"[DEBUG] VAEキャッシュ設定値: {use_vae_cache}, 型: {type(use_vae_cache)}" |
|
|
) |
|
|
|
|
|
|
|
|
global vae_cache_enabled |
|
|
|
|
|
input_img = input_image |
|
|
|
|
|
|
|
|
print( |
|
|
translate("[DEBUG] batch_count 型: {0}, 値: {1}").format( |
|
|
type(batch_count).__name__, batch_count |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
use_vae_cache_ui_value = use_vae_cache |
|
|
|
|
|
|
|
|
use_vae_cache_value = vae_cache_enabled |
|
|
|
|
|
print( |
|
|
f"VAEキャッシュ設定値(UI): {use_vae_cache_ui_value}, 型: {type(use_vae_cache_ui_value)}" |
|
|
) |
|
|
print( |
|
|
f"VAEキャッシュ設定値(グローバル変数): {vae_cache_enabled}, 型: {type(vae_cache_enabled)}" |
|
|
) |
|
|
print( |
|
|
f"最終的なVAEキャッシュ設定値: {use_vae_cache_value}, 型: {type(use_vae_cache_value)}" |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
try: |
|
|
batch_count_val = int(batch_count) |
|
|
batch_count = max(1, min(batch_count_val, 100)) |
|
|
except (ValueError, TypeError): |
|
|
print( |
|
|
translate( |
|
|
"[WARN] validate_and_process: バッチ処理回数が無効です。デフォルト値の1を使用します: {0}" |
|
|
).format(batch_count) |
|
|
) |
|
|
batch_count = 1 |
|
|
|
|
|
|
|
|
|
|
|
print(translate("[DEBUG] validate_and_process詳細:")) |
|
|
print( |
|
|
f"[DEBUG] lora_dropdown1 詳細: 値={repr(lora_dropdown1)}, 型={type(lora_dropdown1).__name__}" |
|
|
) |
|
|
print( |
|
|
f"[DEBUG] lora_dropdown2 詳細: 値={repr(lora_dropdown2)}, 型={type(lora_dropdown2).__name__}" |
|
|
) |
|
|
print( |
|
|
f"[DEBUG] lora_dropdown3 詳細: 値={repr(lora_dropdown3)}, 型={type(lora_dropdown3).__name__}" |
|
|
) |
|
|
|
|
|
|
|
|
if lora_mode == translate("ディレクトリから選択") and has_lora_support: |
|
|
|
|
|
has_dropdown_selection = False |
|
|
dropdown_values = [ |
|
|
(1, lora_dropdown1), |
|
|
(2, lora_dropdown2), |
|
|
(3, lora_dropdown3), |
|
|
] |
|
|
|
|
|
for idx, dropdown in dropdown_values: |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] ドロップダウン{0}の検出処理: 値={1!r}, 型={2}" |
|
|
).format(idx, dropdown, type(dropdown).__name__) |
|
|
) |
|
|
|
|
|
|
|
|
processed_value = dropdown |
|
|
|
|
|
|
|
|
if ( |
|
|
processed_value == 0 |
|
|
or processed_value == "0" |
|
|
or processed_value == 0.0 |
|
|
): |
|
|
|
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] validate_and_process: ドロップダウン{0}の値が数値0として検出されました。'なし'として扱います" |
|
|
).format(idx) |
|
|
) |
|
|
processed_value = translate("なし") |
|
|
|
|
|
|
|
|
if processed_value is not None and not isinstance(processed_value, str): |
|
|
processed_value = str(processed_value) |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] validate_and_process: ドロップダウン{0}の値を文字列に変換: {1!r}" |
|
|
).format(idx, processed_value) |
|
|
) |
|
|
|
|
|
|
|
|
if processed_value and processed_value != translate("なし"): |
|
|
has_dropdown_selection = True |
|
|
print( |
|
|
translate( |
|
|
"[DEBUG] validate_and_process: ドロップダウン{0}で有効な選択を検出: {1!r}" |
|
|
).format(idx, processed_value) |
|
|
) |
|
|
break |
|
|
|
|
|
|
|
|
if has_dropdown_selection: |
|
|
use_lora = True |
|
|
print( |
|
|
translate( |
|
|
"[INFO] validate_and_process: ドロップダウンでLoRAが選択されているため、LoRA使用を自動的に有効化しました" |
|
|
) |
|
|
) |
|
|
|
|
|
|
|
|
print(translate("[DEBUG] LoRA使用フラグの最終状態: {0}").format(use_lora)) |
|
|
|
|
|
|
|
|
is_valid, error_message = validate_images(input_img) |
|
|
|
|
|
if not is_valid: |
|
|
|
|
|
yield ( |
|
|
None, |
|
|
gr.update(visible=False), |
|
|
translate("エラー: 画像が選択されていません"), |
|
|
error_message, |
|
|
gr.update(interactive=True), |
|
|
gr.update(interactive=False), |
|
|
gr.update(), |
|
|
) |
|
|
return |
|
|
|
|
|
|
|
|
|
|
|
print(translate("[DEBUG] LoRA関連の引数を確認:")) |
|
|
print(translate(" - lora_files: {0}").format(lora_files)) |
|
|
print(translate(" - lora_files2: {0}").format(lora_files2)) |
|
|
print(translate(" - lora_files3: {0}").format(lora_files3)) |
|
|
print(translate(" - use_lora: {0}").format(use_lora)) |
|
|
print(translate(" - lora_mode: {0}").format(lora_mode)) |
|
|
|
|
|
|
|
|
resolution_value = 640 |
|
|
|
|
|
|
|
|
use_vae_cache = vae_cache_enabled |
|
|
print(f"最終的なVAEキャッシュ設定フラグ: {use_vae_cache}") |
|
|
|
|
|
|
|
|
print( |
|
|
translate("[DEBUG] 最終的なuse_loraフラグを{0}に設定しました").format( |
|
|
use_lora |
|
|
) |
|
|
) |
|
|
|
|
|
print( |
|
|
f"[DEBUG] 最終的なフラグ設定 - use_vae_cache: {use_vae_cache}, use_lora: {use_lora}" |
|
|
) |
|
|
|
|
|
|
|
|
yield from process( |
|
|
input_image=input_image, |
|
|
prompt=prompt, |
|
|
seed=seed, |
|
|
steps=steps, |
|
|
cfg=cfg, |
|
|
gs=gs, |
|
|
rs=rs, |
|
|
gpu_memory_preservation=gpu_memory_preservation, |
|
|
use_teacache=use_teacache, |
|
|
mp4_crf=mp4_crf, |
|
|
end_frame_strength=end_frame_strength, |
|
|
keep_section_videos=keep_section_videos, |
|
|
lora_files=lora_files, |
|
|
lora_files2=lora_files2, |
|
|
lora_files3=lora_files3, |
|
|
lora_scales_text=lora_scales_text, |
|
|
output_dir=output_dir, |
|
|
save_intermediate_frames=save_intermediate_frames, |
|
|
use_lora=use_lora, |
|
|
lora_mode=lora_mode, |
|
|
lora_dropdown1=lora_dropdown1, |
|
|
lora_dropdown2=lora_dropdown2, |
|
|
lora_dropdown3=lora_dropdown3, |
|
|
save_tensor_data=save_tensor_data, |
|
|
tensor_data_input=tensor_data_input, |
|
|
trim_start_latent_size=trim_start_latent_size, |
|
|
generation_latent_size=generation_latent_size, |
|
|
combine_mode=combine_mode, |
|
|
fp8_optimization=fp8_optimization, |
|
|
batch_count=batch_count, |
|
|
use_vae_cache=use_vae_cache, |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
ips = [ |
|
|
input_image, |
|
|
prompt, |
|
|
seed, |
|
|
steps, |
|
|
cfg, |
|
|
gs, |
|
|
rs, |
|
|
gpu_memory_preservation, |
|
|
use_teacache, |
|
|
mp4_crf, |
|
|
end_frame_strength, |
|
|
keep_section_videos, |
|
|
lora_files, |
|
|
lora_files2, |
|
|
lora_files3, |
|
|
lora_scales_text, |
|
|
output_dir, |
|
|
save_intermediate_frames, |
|
|
use_lora, |
|
|
lora_mode, |
|
|
lora_dropdown1, |
|
|
lora_dropdown2, |
|
|
lora_dropdown3, |
|
|
save_tensor_data, |
|
|
tensor_data_input, |
|
|
trim_start_latent_size, |
|
|
generation_latent_size, |
|
|
combine_mode, |
|
|
fp8_optimization, |
|
|
batch_count, |
|
|
use_vae_cache, |
|
|
] |
|
|
|
|
|
|
|
|
print( |
|
|
f"use_vae_cacheチェックボックス値: {use_vae_cache.value if hasattr(use_vae_cache, 'value') else 'no value attribute'}, id={id(use_vae_cache)}" |
|
|
) |
|
|
start_button.click( |
|
|
fn=validate_and_process, |
|
|
inputs=ips, |
|
|
outputs=[ |
|
|
result_video, |
|
|
preview_image, |
|
|
progress_desc, |
|
|
progress_bar, |
|
|
start_button, |
|
|
end_button, |
|
|
seed, |
|
|
], |
|
|
) |
|
|
end_button.click(fn=end_process, outputs=[end_button]) |
|
|
|
|
|
|
|
|
def save_button_click_handler(name, prompt_text): |
|
|
"""保存ボタンクリック時のハンドラ関数""" |
|
|
|
|
|
|
|
|
if "A character" in prompt_text and prompt_text.count("A character") > 1: |
|
|
sentences = prompt_text.split(".") |
|
|
if len(sentences) > 0: |
|
|
prompt_text = sentences[0].strip() + "." |
|
|
|
|
|
|
|
|
|
|
|
result_msg = save_preset(name, prompt_text) |
|
|
|
|
|
|
|
|
presets_data = load_presets() |
|
|
choices = [preset["name"] for preset in presets_data["presets"]] |
|
|
default_presets = [ |
|
|
n |
|
|
for n in choices |
|
|
if any( |
|
|
p["name"] == n and p.get("is_default", False) |
|
|
for p in presets_data["presets"] |
|
|
) |
|
|
] |
|
|
user_presets = [n for n in choices if n not in default_presets] |
|
|
sorted_choices = [ |
|
|
(n, n) for n in sorted(default_presets) + sorted(user_presets) |
|
|
] |
|
|
|
|
|
|
|
|
return result_msg, gr.update(choices=sorted_choices), gr.update() |
|
|
|
|
|
|
|
|
save_btn.click( |
|
|
fn=save_button_click_handler, |
|
|
inputs=[edit_name, edit_prompt], |
|
|
outputs=[result_message, preset_dropdown, prompt], |
|
|
) |
|
|
|
|
|
|
|
|
def clear_fields(): |
|
|
return gr.update(value=""), gr.update(value="") |
|
|
|
|
|
clear_btn.click(fn=clear_fields, inputs=[], outputs=[edit_name, edit_prompt]) |
|
|
|
|
|
|
|
|
def load_preset_handler(preset_name): |
|
|
|
|
|
for preset in load_presets()["presets"]: |
|
|
if preset["name"] == preset_name: |
|
|
return gr.update(value=preset_name), gr.update(value=preset["prompt"]) |
|
|
return gr.update(), gr.update() |
|
|
|
|
|
|
|
|
def load_preset_handler_wrapper(preset_name): |
|
|
|
|
|
if isinstance(preset_name, tuple) and len(preset_name) == 2: |
|
|
preset_name = preset_name[1] |
|
|
return load_preset_handler(preset_name) |
|
|
|
|
|
preset_dropdown.change( |
|
|
fn=load_preset_handler_wrapper, |
|
|
inputs=[preset_dropdown], |
|
|
outputs=[edit_name, edit_prompt], |
|
|
) |
|
|
|
|
|
|
|
|
def apply_to_prompt(edit_text): |
|
|
"""編集画面の内容をメインプロンプトに反映する関数""" |
|
|
|
|
|
return gr.update(value=edit_text) |
|
|
|
|
|
|
|
|
def delete_preset_handler(preset_name): |
|
|
|
|
|
if isinstance(preset_name, tuple) and len(preset_name) == 2: |
|
|
preset_name = preset_name[1] |
|
|
|
|
|
result = delete_preset(preset_name) |
|
|
|
|
|
|
|
|
presets_data = load_presets() |
|
|
choices = [preset["name"] for preset in presets_data["presets"]] |
|
|
default_presets = [ |
|
|
name |
|
|
for name in choices |
|
|
if any( |
|
|
p["name"] == name and p.get("is_default", False) |
|
|
for p in presets_data["presets"] |
|
|
) |
|
|
] |
|
|
user_presets = [name for name in choices if name not in default_presets] |
|
|
sorted_names = sorted(default_presets) + sorted(user_presets) |
|
|
updated_choices = [(name, name) for name in sorted_names] |
|
|
|
|
|
return result, gr.update(choices=updated_choices) |
|
|
|
|
|
apply_preset_btn.click(fn=apply_to_prompt, inputs=[edit_prompt], outputs=[prompt]) |
|
|
|
|
|
delete_preset_btn.click( |
|
|
fn=delete_preset_handler, |
|
|
inputs=[preset_dropdown], |
|
|
outputs=[result_message, preset_dropdown], |
|
|
) |
|
|
|
|
|
|
|
|
enable_keyframe_copy = True |
|
|
|
|
|
allowed_paths = [ |
|
|
os.path.abspath( |
|
|
os.path.realpath(os.path.join(os.path.dirname(__file__), "./outputs")) |
|
|
) |
|
|
] |
|
|
|
|
|
|
|
|
try: |
|
|
block.launch( |
|
|
server_name=args.server, |
|
|
server_port=args.port, |
|
|
share=args.share, |
|
|
allowed_paths=allowed_paths, |
|
|
inbrowser=args.inbrowser, |
|
|
) |
|
|
except OSError as e: |
|
|
if "Cannot find empty port" in str(e): |
|
|
print("\n======================================================") |
|
|
print(translate("エラー: FramePack-eichiは既に起動しています。")) |
|
|
print(translate("同時に複数のインスタンスを実行することはできません。")) |
|
|
print(translate("現在実行中のアプリケーションを先に終了してください。")) |
|
|
print("======================================================\n") |
|
|
input(translate("続行するには何かキーを押してください...")) |
|
|
else: |
|
|
|
|
|
print(translate("\nエラーが発生しました: {e}").format(e=e)) |
|
|
input(translate("続行するには何かキーを押してください...")) |
|
|
|