import os
import subprocess
import sys
from datetime import datetime
from threading import Thread

import psutil
import toml  # 用于保存和加载设置
import gradio as gr

# 添加项目根目录到Python路径
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))

import auto_shutdown
from logger import logger

sys.path.append(os.path.join(os.path.dirname(__file__), "musubi-tuner"))

# 输出绑定变量
preCacheLogger = None
trainLogger = None
settings_text = {"content": ""}

QWEN_IMAGE_SETTINGS_FILE = "qwen_image_settings.toml"
# 预缓存进程
cache_process = None
cache_process_is_running = False
# 训练进程
train_process = None
train_process_is_running = False

def load_settings() -> dict:
    if os.path.exists(QWEN_IMAGE_SETTINGS_FILE):
        try:
            with open(QWEN_IMAGE_SETTINGS_FILE, "r", encoding="utf-8") as f:
                settings = toml.load(f)
                return settings
        except Exception:
            return {}
    else:
        return {}

def save_settings(settings_dict):
    try:
        with open(QWEN_IMAGE_SETTINGS_FILE, "w", encoding="utf-8") as f:
            toml.dump(settings_dict, f)
    except Exception as e:
        print(f"[WARN] 保存 settings.toml 失败: {e}")

def preview_settings(settings_dict):
    return toml.dumps(settings_dict)

def writePreCacheLog(message, log_box):
    try:
        # 确保消息是字符串类型
        message = str(message)
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S ")
        if log_box:  # 检查log_box是否为None
            # 将日志添加到Gradio组件
            log_box = log_box + f"{timestamp}{message}\n"
        logger.info(message)
    except Exception as e:
        logger.info(f"logger error: {str(e)}")
    return log_box

def writeTrainLog(message, log_box):
    try:
        # 确保消息是字符串类型
        message = str(message)
        timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S ")
        if log_box:  # 检查log_box是否为None
            # 将日志添加到Gradio组件
            log_box = log_box + f"{timestamp}{message}\n"
        logger.info(message)
    except Exception as e:
        logger.info(f"logger error: {str(e)}")
    return log_box

def start_pre_caching(settings_dict, log_box):
    log_box = writePreCacheLog("开始执行预缓存...", log_box)
    global cache_process

    dataset_config = settings_dict["dataset_config"]
    vae_path = settings_dict["vae_path"]
    text_encoder_model_path = settings_dict["text_encoder_model_path"]
    fp8 = settings_dict["fp8"]
    batch_size = settings_dict["batch_size"]
    train_type = settings_dict["train_type"]

    python_executable = sys.executable
    # 获取当前脚本所在目录
    base_dir = os.path.dirname(os.path.abspath(__file__))

    # 拼接 musubi-tuner 目录下的脚本路径
    MUSUBI_DIR = os.path.join(base_dir, "musubi-tuner", "src", "musubi_tuner")
    cache_latents_path = os.path.join(MUSUBI_DIR, "qwen_image_cache_latents.py")
    cache_text_encoder_path = os.path.join(
        MUSUBI_DIR, "qwen_image_cache_text_encoder_outputs.py"
    )
    
    env = os.environ.copy()
    env["PYTHONPATH"] = os.pathsep.join(
        [os.path.dirname(MUSUBI_DIR), env.get("PYTHONPATH", "")]  # LoRAMaster 根目录
    )
    env["PYTHONIOENCODING"] = "utf-8"
    env["LOG_LEVEL"] = "DEBUG"

    cache_latents_cmd = [
        python_executable,
        cache_latents_path,
        "--dataset_config",
        dataset_config,
        "--vae",
        vae_path,
    ]

    cache_text_encoder_cmd = [
        python_executable,
        cache_text_encoder_path,
        "--dataset_config",
        dataset_config,
        "--text_encoder",
        text_encoder_model_path,
        "--batch_size",
        str(batch_size),
    ]
    if fp8:
        cache_text_encoder_cmd.append("--fp8_vl")
    if train_type == "qwen_image_edit":
        cache_latents_cmd.extend(["--edit"])
        cache_text_encoder_cmd.extend(["--edit"])
    elif train_type == "qwen_image_edit_plus":
        cache_latents_cmd.extend(["--edit_plus"])
        cache_text_encoder_cmd.extend(["--edit_plus"])

    # 异步执行训练
    def run_cache():
        nonlocal log_box
        log_box = writePreCacheLog("开始执行预缓存 1/2 ...", log_box)
        log_box = writePreCacheLog(" ".join(cache_latents_cmd), log_box)
        
        global cache_process
        cache_process = subprocess.Popen(
            cache_latents_cmd,
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            text=True,
            env=env,
            encoding="utf-8",
            errors="ignore",
        )

        for line in cache_process.stdout:
            log_box = writePreCacheLog(line.strip(), log_box)

        return_code = cache_process.wait()
        cache_process = None
        if return_code != 0:
            log_box = writePreCacheLog(f"\n[ERROR] 命令执行失败，返回码: {return_code}\n", log_box)
        log_box = writePreCacheLog("预缓存 1/2 完成!", log_box)

        log_box = writePreCacheLog("开始执行预缓存2/2...", log_box)
        log_box = writePreCacheLog(" ".join(cache_text_encoder_cmd), log_box)
        cache_process = subprocess.Popen(
            cache_text_encoder_cmd,
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            text=True,
            env=env,
            encoding="utf-8",
            errors="ignore",
        )

        for line in cache_process.stdout:
            log_box = writePreCacheLog(line.strip(), log_box)

        return_code = cache_process.wait()
        cache_process = None
        if return_code != 0:
            log_box = writePreCacheLog(f"\n[ERROR] 命令执行失败，返回码: {return_code}\n", log_box)
        log_box = writePreCacheLog("预缓存 2/2 完成!", log_box)

    Thread(target=run_cache).start()
    return log_box

def terminate_process_tree(proc: subprocess.Popen):
    if proc is None:
        return
    try:
        parent_pid = proc.pid
        if parent_pid is None:
            return
        parent = psutil.Process(parent_pid)
        for child in parent.children(recursive=True):
            child.terminate()
        parent.terminate()
    except psutil.NoSuchProcess:
        pass
    except Exception as e:
        print(f"[WARN] terminate_process_tree 出现异常: {e}")

def stop_caching(log_box):
    global cache_process
    msg = ""
    if cache_process is not None:
        proc = cache_process
        if proc.poll() is None:
            terminate_process_tree(proc)
            cache_process = None
            msg = "预缓存进程已被手动终止..."
        else:
            msg = "预缓存进程已经结束，无需停止..."
    else:
        msg = "当前没有正在进行的预缓存进程..."
    return writePreCacheLog(msg, log_box)

def make_prompt_file(
    prompt_text: str,
    w: int,
    h: int,
    seed: int,
    steps: int,
    custom_prompt_txt: bool,
    custom_prompt_path: str,
    prompt_file_upload: str = None,
    image_path: str = None,
) -> str:
    if prompt_file_upload and os.path.isfile(prompt_file_upload):
        return prompt_file_upload
    elif custom_prompt_txt and custom_prompt_path.strip():
        return custom_prompt_path.strip()
    else:
        default_prompt_path = "./qwen_image_prompt_file.txt"
        with open(default_prompt_path, "w", encoding="utf-8") as f:
            f.write("# prompt 1: for generating a cat video\n")
            line = f"{prompt_text} --w {w} --h {h} --d {seed} --s {steps}"
            if image_path:
                line = line + " --ci " + image_path

            line = line + "\n"
            f.write(line)

        return default_prompt_path

def run_wan_training(settings_dict, log_box):
    dataset_config = settings_dict["dataset_config"]
    dit_weights_path = settings_dict["dit_weights_path"]
    vae_path = settings_dict["vae_path"]
    text_encoder_model_path = settings_dict["text_encoder_model_path"]
    train_type = settings_dict["train_type"]

    learning_rate = settings_dict["learning_rate"]
    gradient_accumulation_steps = settings_dict["gradient_accumulation_steps"]
    network_dim = settings_dict["network_dim"]
    timestep_sampling = settings_dict["timestep_sampling"]
    discrete_flow_shift = settings_dict["discrete_flow_shift"]
    max_train_epochs = settings_dict["max_train_epochs"]
    save_every_n_epochs = settings_dict["save_every_n_epochs"]
    save_every_n_steps = settings_dict["save_every_n_steps"]
    output_dir = settings_dict["output_dir"]
    output_name = settings_dict["output_name"]
    enable_low_vram = settings_dict["enable_low_vram"]
    blocks_to_swap = settings_dict["blocks_to_swap"]
    generate_samples = settings_dict["generate_samples"]
    sample_prompt_text = settings_dict["sample_prompt_text"]
    sample_image_path = settings_dict["sample_image_path"]
    sample_w = settings_dict["sample_w"]
    sample_h = settings_dict["sample_h"]
    sample_seed = settings_dict["sample_seed"]
    sample_steps = settings_dict["sample_steps"]
    custom_prompt_txt = settings_dict["custom_prompt_txt"]
    custom_prompt_path = settings_dict["custom_prompt_path"]
    sample_every_n_epochs = settings_dict["sample_every_n_epochs"]
    sample_every_n_steps = settings_dict["sample_every_n_steps"]
    sample_vae_path = settings_dict["vae_path"]
    fp8 = settings_dict["fp8"]
    num_cpu_threads_per_process = settings_dict["num_cpu_threads_per_process"]
    num_processes = settings_dict["num_processes"]
    attention_implementation = settings_dict["attention_implementation"]
    optimizer_type = settings_dict["optimizer_type"]
    max_data_loader_n_workers = settings_dict["max_data_loader_n_workers"]
    log_type = settings_dict["log_type"]
    log_prefix = settings_dict["log_prefix"]
    log_dir = settings_dict["log_dir"]
    log_tracker_name = settings_dict["log_tracker_name"]
    offload_inactive_dit = settings_dict["offload_inactive_dit"]
    mixed_precision = settings_dict["mixed_precision"]
    sample_at_first = settings_dict["sample_at_first"]
    network_weights_path = settings_dict["network_weights_path"]
    use_network_weights = settings_dict["use_network_weights"]
    auto_shutdown_flag = settings_dict["auto_shutdown"]

    python_executable = sys.executable
    # 获取当前脚本所在目录
    base_dir = os.path.dirname(os.path.abspath(__file__))

    # 拼接 musubi-tuner 目录下的脚本路径
    MUSUBI_DIR = os.path.join(base_dir, "musubi-tuner", "src", "musubi_tuner")
    train_network_path = os.path.join(MUSUBI_DIR, "qwen_image_train_network.py")
    
    env = os.environ.copy()
    env["PYTHONPATH"] = os.pathsep.join(
        [os.path.dirname(MUSUBI_DIR), env.get("PYTHONPATH", "")]  # LoRAMaster 根目录
    )
    env["PYTHONIOENCODING"] = "utf-8"
    env["LOG_LEVEL"] = "DEBUG"

    lr_scheduler = settings_dict["lr_scheduler"]
    lr_scheduler_num_cycles = settings_dict["lr_scheduler_num_cycles"]
    lr_warmup_steps = settings_dict["lr_warmup_steps"]
    custom_params = settings_dict["custom_params"]
    command = [
        python_executable,
        "-m",
        "accelerate.commands.launch",
        "--num_cpu_threads_per_process",
        str(num_cpu_threads_per_process),
        "--mixed_precision",
        mixed_precision,
        "--num_processes",
        str(num_processes),  # 只使用一个进程
        "--gpu_ids",
        "0",  # 只使用第一张GPU
        train_network_path,
        "--dit",
        dit_weights_path,
        "--dataset_config",
        dataset_config,
        "--vae",
        vae_path,
        "--text_encoder",
        text_encoder_model_path,
        "--mixed_precision",
        mixed_precision,
        "--optimizer_type",
        optimizer_type,
        "--learning_rate",
        learning_rate,
        "--gradient_checkpointing",
        f"--gradient_accumulation_steps={gradient_accumulation_steps}",
        "--max_data_loader_n_workers",
        str(max_data_loader_n_workers),
        "--persistent_data_loader_workers",
        "--network_module",
        "networks.lora_qwen_image",
        "--network_dim",
        str(network_dim),
        "--timestep_sampling",
        timestep_sampling,
        "--discrete_flow_shift",
        str(discrete_flow_shift),
        "--max_train_epochs",
        str(max_train_epochs),
        "--save_every_n_epochs",
        str(save_every_n_epochs),
        "--save_every_n_steps",
        str(save_every_n_steps),
        "--output_dir",
        output_dir,
        "--output_name",
        output_name,
        "--seed",
        "42",
        "--log_with",
        log_type,
        "--lr_scheduler",
        lr_scheduler,
    ]
    if attention_implementation == "sdpa":
        command.extend(["--sdpa"])
    elif attention_implementation == "xformers":
        command.extend(["--xformers", "--split_attn"])
    if offload_inactive_dit:
        command.extend(["--offload_inactive_dit"])
    if enable_low_vram:
        command.extend(["--blocks_to_swap", str(blocks_to_swap)])
    if use_network_weights and network_weights_path.strip():
        command.extend(["--network_weights", network_weights_path.strip()])

    if fp8:
        command.extend(["--fp8_base"])
    if train_type == "qwen_image_edit":
        command.extend(["--edit"])
    elif train_type == "qwen_image_edit_plus":
        command.extend(["--edit_plus"])

    if generate_samples:
        prompt_file_final = make_prompt_file(
            prompt_text=sample_prompt_text,
            w=sample_w,
            h=sample_h,
            seed=sample_seed,
            steps=sample_steps,
            custom_prompt_txt=custom_prompt_txt,
            custom_prompt_path=custom_prompt_path,
            image_path=sample_image_path,
        )
        command.extend(
            [
                "--sample_prompts",
                prompt_file_final,
                "--sample_every_n_epochs",
                str(sample_every_n_epochs),
                "--sample_every_n_steps",
                str(sample_every_n_steps),
                "--vae",
                sample_vae_path,
            ]
        )

        if sample_at_first:
            command.extend(["--sample_at_first"])

    if log_dir:
        command.extend(["--logging_dir", log_dir])
    if log_prefix:
        command.extend(["--log_prefix", log_prefix])
    if log_tracker_name:
        command.extend(["--log_tracker_name", log_tracker_name])
    if lr_scheduler == "cosine_with_restarts":
        command.extend(["--lr_scheduler_num_cycles", str(lr_scheduler_num_cycles)])
    if custom_params:
        command.extend([custom_params])
    if lr_scheduler == "constant_with_warmup":
        command.extend(["--lr_warmup_steps", str(lr_warmup_steps)])

    def run_and_stream_output():
        nonlocal log_box
        global train_process
        train_process = subprocess.Popen(
            command,
            stdout=subprocess.PIPE,
            stderr=subprocess.STDOUT,
            text=True,
            env=env,
            encoding="utf-8",
            errors="ignore",
        )

        for line in train_process.stdout:
            log_box = writeTrainLog(line, log_box)
        return_code = train_process.wait()
        train_process = None
        if return_code != 0:
            log_box = writeTrainLog(f"\n[ERROR] 命令执行失败，返回码: {return_code}\n", log_box)
        else:
            try:
                log_box = writeTrainLog("训练完成!", log_box)
            except Exception as e:
                print(f"{e}")
            # 自动关机
            if auto_shutdown_flag:
                auto_shutdown.shutdown()

    log_box = writeTrainLog("开始运行 Wan LoRA训练命令...\n\n", log_box)
    log_box = writeTrainLog(" ".join(command), log_box)
    Thread(target=run_and_stream_output).start()
    return log_box

def stop_train(log_box):
    global train_process
    msg = ""
    if train_process is not None:
        proc = train_process
        if proc.poll() is None:
            terminate_process_tree(proc)
            train_process = None
            msg = "模型训练进程已被手动终止..."
        else:
            msg = "模型训练进程已经结束，无需停止..."
    else:
        msg = "当前没有正在进行的模型训练进程..."
    return writeTrainLog(msg, log_box)

# 选择文件的函数
def select_file(file_type=None):
    try:
        import tkinter as tk
        from tkinter import filedialog
        root = tk.Tk()
        root.withdraw()  # 隐藏主窗口
        if file_type:
            file_path = filedialog.askopenfilename(filetypes=file_type)
        else:
            file_path = filedialog.askopenfilename()
        root.destroy()
        return file_path
    except Exception as e:
        return f"选择文件时出错: {str(e)}"

# 选择文件夹的函数
def select_folder():
    try:
        import tkinter as tk
        from tkinter import filedialog
        root = tk.Tk()
        root.withdraw()  # 隐藏主窗口
        folder_path = filedialog.askdirectory()
        root.destroy()
        return folder_path
    except Exception as e:
        return f"选择文件夹时出错: {str(e)}"

def create_ui():
    # 加载默认设置
    default_settings = {
        "train_type": "qwen_image",
        "dit_weights_path": "",
        "vae_path": "",
        "text_encoder_model_path": "",
        "fp8": False,
        "dataset_config": "",
        "batch_size": 4,
        "max_train_epochs": 4,
        "learning_rate": "2e-4",
        "lr_scheduler": "cosine_with_restarts",
        "lr_warmup_steps": 0,
        "lr_scheduler_num_cycles": 1,
        "optimizer_type": "adamw8bit",
        "network_dim": 32,
        "mixed_precision": "bf16",
        "gradient_accumulation_steps": 4,
        "timestep_sampling": "importance",
        "discrete_flow_shift": 2.2,
        "max_data_loader_n_workers": 2,
        "attention_implementation": "sdpa",
        "enable_low_vram": False,
        "blocks_to_swap": 16,
        "num_cpu_threads_per_process": 8,
        "num_processes": 1,
        "use_network_weights": False,
        "network_weights_path": "",
        "generate_samples": False,
        "sample_at_first": False,
        "sample_every_n_epochs": 1,
        "sample_every_n_steps": 0,
        "sample_prompt_text": "",
        "sample_image_path": "",
        "sample_w": 1024,
        "sample_h": 1024,
        "sample_seed": 42,
        "sample_steps": 20,
        "output_dir": "./output",
        "output_name": "qwen_image_lora",
        "save_every_n_epochs": 1,
        "save_every_n_steps": 0,
        "log_type": "tensorboard",
        "log_prefix": "",
        "log_dir": "output",
        "log_tracker_name": "",
        "offload_inactive_dit": False,
        "custom_params": "",
        "auto_shutdown": False,
        "custom_prompt_txt": False,
        "custom_prompt_path": ""
    }
    
    # 尝试加载保存的设置，如果不存在则使用默认设置
    settings = load_settings()
    for key, value in default_settings.items():
        if key not in settings:
            settings[key] = value
    
    # 保存更新后的设置
    save_settings(settings)
    
    # 创建界面组件
    with gr.Blocks(title="Qwen Image LoRA训练", theme=gr.themes.Soft()) as demo:
        # 参数预览区域放在顶部，方便用户查看
        with gr.Row():
            with gr.Column(scale=2):
                gr.Markdown("# Qwen Image LoRA训练")
            with gr.Column(scale=1):
                save_btn = gr.Button("保存设置", variant="primary")
                load_btn = gr.Button("加载设置")
        
        # 设置预览框
        settings_preview = gr.Textbox(
            label="当前配置预览",
            value=preview_settings(settings),
            lines=5,
            interactive=False
        )
        
        # 使用Tabs来组织不同的配置区域
        with gr.Tabs():
            # 文件路径设置Tab
            with gr.Tab("文件路径设置"):
                # 模型文件路径组
                gr.Markdown("### 模型文件路径")
                with gr.Row():
                    # DiT权重文件路径
                    dit_weights_path = gr.Textbox(
                        label="DiT Weights Path / DiT权重文件路径",
                        value=settings["dit_weights_path"]
                    )
                    dit_weights_btn = gr.Button("选择文件")
                
                with gr.Row():
                    # VAE路径
                    vae_path = gr.Textbox(
                        label="VAE File Path / VAE文件路径",
                        value=settings["vae_path"]
                    )
                    vae_btn = gr.Button("选择文件")
                
                with gr.Row():
                    # Text Encoder模型路径
                    text_encoder_model_path = gr.Textbox(
                        label="Text Encoder Model Path / Text Encoder模型路径",
                        value=settings["text_encoder_model_path"]
                    )
                    text_encoder_btn = gr.Button("选择文件")
                
                # 数据集配置
                gr.Markdown("### 数据集配置")
                with gr.Row():
                    dataset_config = gr.Textbox(
                        label="Input toml path / 输入toml文件路径",
                        value=settings["dataset_config"]
                    )
                    dataset_config_btn = gr.Button("选择文件")
                
                # 其他路径设置
                gr.Markdown("### 其他路径")
                with gr.Row():
                    # 输出目录 - 文件夹选择器
                    output_dir = gr.Textbox(
                        label="Output Directory / 输出目录",
                        value=settings["output_dir"]
                    )
                    output_dir_btn = gr.Button("选择文件夹")
                
                # 网络权重路径
                with gr.Row(visible=settings["use_network_weights"]) as network_weights_row:
                    network_weights_path = gr.Textbox(
                        label="Weights File Path / 权重文件路径",
                        value=settings["network_weights_path"]
                    )
                    network_weights_btn = gr.Button("选择文件")
                
                # 采样图片路径
                with gr.Row(visible=settings["generate_samples"]) as sample_image_row:
                    sample_image_path = gr.Textbox(
                        label="Image Path / 图片路径（训练Qwen Image Edit时填写）",
                        value=settings["sample_image_path"]
                    )
                    sample_image_btn = gr.Button("选择文件")
                
                # 基本设置
                gr.Markdown("### 基本设置")
                with gr.Row():
                    train_type = gr.Dropdown(
                        choices=["qwen_image", "qwen_image_edit", "qwen_image_edit_plus"],
                        value=settings["train_type"],
                        label="Qwen_Image Type / 训练类型"
                    )
                    fp8 = gr.Checkbox(
                        value=settings["fp8"],
                        label="FP8 (开启FP8模式，节省显存)"
                    )
            
            # 训练参数Tab
            with gr.Tab("训练参数"):
                # 训练基本参数 - 三列布局
                gr.Markdown("### 训练基本参数")
                with gr.Row():
                    with gr.Column():
                        # 最大训练轮数 - 使用滑块
                        max_train_epochs = gr.Slider(
                            minimum=1,
                            maximum=1000,
                            step=1,
                            value=settings["max_train_epochs"],
                            label="Max Train Epochs / 最大训练轮数"
                        )
                    with gr.Column():
                        # 学习率 - 使用文本框
                        learning_rate = gr.Textbox(
                            value=settings["learning_rate"],
                            label="Learning Rate / 学习率"
                        )
                    with gr.Column():
                        # 网络维度 - 使用滑块
                        network_dim = gr.Slider(
                            minimum=2,
                            maximum=128,
                            step=1,
                            value=settings["network_dim"],
                            label="Network Dim / 网络维度"
                        )
                
                # 第二行参数
                with gr.Row():
                    with gr.Column():
                        # 学习率调度器
                        lr_scheduler = gr.Dropdown(
                            choices=["cosine_with_restarts", "linear", "cosine", "polynomial", "constant", "constant_with_warmup"],
                            value=settings["lr_scheduler"],
                            label="Learning Rate Scheduler / 学习率调度器"
                        )
                    with gr.Column():
                        # 预热步数 - 使用滑块
                        lr_warmup_steps = gr.Slider(
                            minimum=0,
                            maximum=10000,
                            step=10,
                            value=settings["lr_warmup_steps"],
                            label="Learning Rate Warmup Steps / 学习率预热步数"
                        )
                    with gr.Column():
                        # 调度器重启次数 - 使用滑块
                        lr_scheduler_num_cycles = gr.Slider(
                            minimum=1,
                            maximum=10,
                            step=1,
                            value=settings["lr_scheduler_num_cycles"],
                            label="Learning Rate Scheduler Num Cycles / 重启次数"
                        )
                
                # 第三行参数
                with gr.Row():
                    with gr.Column():
                        # 优化器类型
                        optimizer_type = gr.Dropdown(
                            choices=["adamw8bit", "adamw", "lion"],
                            value=settings["optimizer_type"],
                            label="Optimizer Type / 优化器类型"
                        )
                    with gr.Column():
                        # 混合精度
                        mixed_precision = gr.Dropdown(
                            choices=["fp16", "bf16"],
                            value=settings["mixed_precision"],
                            label="Mixed Precision / 混合精度"
                        )
                    with gr.Column():
                        # 梯度累积步数 - 使用滑块
                        gradient_accumulation_steps = gr.Slider(
                            minimum=1,
                            maximum=32,
                            step=1,
                            value=settings["gradient_accumulation_steps"],
                            label="Gradient Accumulation Steps / 梯度累积步数"
                        )
                
                # 高级训练参数
                gr.Markdown("### 高级训练参数")
                with gr.Row():
                    with gr.Column():
                        # 时间步采样
                        timestep_sampling = gr.Textbox(
                            value=settings["timestep_sampling"],
                            label="Timestep Sampling / 时间步采样"
                        )
                    with gr.Column():
                        # 离散流移位 - 使用滑块
                        discrete_flow_shift = gr.Slider(
                            minimum=0.0,
                            maximum=5.0,
                            step=0.1,
                            value=settings["discrete_flow_shift"],
                            label="Discrete Flow Shift / 离散流移位"
                        )
                    with gr.Column():
                        # 数据加载工作线程数 - 使用滑块
                        max_data_loader_n_workers = gr.Slider(
                            minimum=0,
                            maximum=16,
                            step=1,
                            value=settings["max_data_loader_n_workers"],
                            label="max_data_loader_n_workers / 数据加载工作线程数"
                        )
            
            # 显存优化Tab
            with gr.Tab("显存优化"):
                with gr.Row():
                    with gr.Column():
                        # 注意力实现
                        attention_implementation = gr.Dropdown(
                            choices=["sdpa", "xformers"],
                            value=settings["attention_implementation"],
                            label="Attention Implementation / 注意力实现"
                        )
                        # 低显存模式
                        enable_low_vram = gr.Checkbox(
                            value=settings["enable_low_vram"],
                            label="Enable Low VRAM Mode / 启用低显存模式"
                        )
                        # 交换块数 (低显存模式下显示)
                        blocks_to_swap = gr.Slider(
                            minimum=1,
                            maximum=32,
                            step=1,
                            value=settings["blocks_to_swap"],
                            label="Blocks to Swap / 交换块数",
                            visible=settings["enable_low_vram"]
                        )
                    with gr.Column():
                        # CPU线程数 - 使用滑块
                        num_cpu_threads_per_process = gr.Slider(
                            minimum=1,
                            maximum=32,
                            step=1,
                            value=settings["num_cpu_threads_per_process"],
                            label="num_cpu_threads_per_process / 每个进程的CPU线程数"
                        )
                        # 进程数 - 使用滑块
                        num_processes = gr.Slider(
                            minimum=1,
                            maximum=8,
                            step=1,
                            value=settings["num_processes"],
                            label="num_processes / 进程数"
                        )
                        # 从已有权重继续训练
                        use_network_weights = gr.Checkbox(
                            value=settings["use_network_weights"],
                            label="Continue Training From Existing Weights / 从已有权重继续训练"
                        )
            
            # 过程采样Tab
            with gr.Tab("过程采样"):
                # 训练期间生成示例
                generate_samples = gr.Checkbox(
                    value=settings["generate_samples"],
                    label="Generate Samples During Training / 训练期间生成示例"
                )
                
                # 采样设置区域
                with gr.Row(visible=settings["generate_samples"]) as sample_settings_row:
                    with gr.Column():
                        sample_at_first = gr.Checkbox(
                            value=settings["sample_at_first"],
                            label="Sample at first / 训练前生成示例"
                        )
                        # 采样频率 - 滑块
                        sample_every_n_epochs = gr.Slider(
                            minimum=1,
                            maximum=50,
                            step=1,
                            value=settings["sample_every_n_epochs"],
                            label="Sample Every N Epochs / 每N个轮次采样一次"
                        )
                        sample_every_n_steps = gr.Slider(
                            minimum=0,
                            maximum=10000,
                            step=100,
                            value=settings["sample_every_n_steps"],
                            label="Sample Every N Steps / 每N步采样一次",
                        )
                    with gr.Column():
                        # 提示词
                        sample_prompt_text = gr.Textbox(
                            value=settings["sample_prompt_text"],
                            label="Prompt Text / 提示词",
                            lines=2
                        )
                        # 采样参数 - 滑块
                        sample_w = gr.Slider(
                            minimum=256,
                            maximum=1024,
                            step=64,
                            value=settings["sample_w"],
                            label="Width (w) / 宽度"
                        )
                        sample_h = gr.Slider(
                            minimum=256,
                            maximum=1024,
                            step=64,
                            value=settings["sample_h"],
                            label="Height (h) / 高度"
                        )
                        sample_seed = gr.Slider(
                            minimum=-1,
                            maximum=999999999,
                            step=1,
                            value=settings["sample_seed"],
                            label="Seed (d) / 种子"
                        )
                        sample_steps = gr.Slider(
                            minimum=1,
                            maximum=100,
                            step=1,
                            value=settings["sample_steps"],
                            label="Steps (s) / 步数"
                        )
            
            # 输出设置Tab
            with gr.Tab("输出设置"):
                with gr.Row():
                    # 输出名称
                    output_name = gr.Textbox(
                        value=settings["output_name"],
                        label="Output Name / 输出名称"
                    )
                
                with gr.Row():
                    # 保存频率 - 滑块
                    save_every_n_epochs = gr.Slider(
                        minimum=1,
                        maximum=50,
                        step=1,
                        value=settings["save_every_n_epochs"],
                        label="Save Every N Epochs / 每N个轮次保存一次"
                    )
                    save_every_n_steps = gr.Slider(
                        minimum=0,
                        maximum=10000,
                        step=100,
                        value=settings["save_every_n_steps"],
                        label="Save Every N Steps / 每N步保存一次"
                    )
                
                # 日志设置
                gr.Markdown("### 日志设置")
                with gr.Row():
                    log_type = gr.Dropdown(
                        choices=["tensorboard", "wandb"],
                        value=settings["log_type"],
                        label="Log Type / 日志类型"
                    )
                    log_prefix = gr.Textbox(
                        value=settings["log_prefix"],
                        label="Log Prefix / 日志前缀"
                    )
                    log_dir = gr.Textbox(
                        value=settings["log_dir"],
                        label="Log Directory / 日志目录"
                    )
                
                # 其他设置
                gr.Markdown("### 其他设置")
                with gr.Row():
                    # 自定义参数
                    custom_params = gr.Textbox(
                        value=settings["custom_params"],
                        label="Custom Parameter / 自定义参数"
                    )
                    # 自动关机
                    auto_shutdown_flag = gr.Checkbox(
                        value=settings["auto_shutdown"],
                        label="Auto Shutdown / 训练完成后自动关机"
                    )
            
            # 预缓存Tab
            with gr.Tab("预缓存"):
                # 批量大小 - 滑块
                batch_size = gr.Slider(
                    minimum=1,
                    maximum=32,
                    step=1,
                    value=settings["batch_size"],
                    label="Batch size / 批量大小"
                )
                
                # 运行和停止预缓存按钮
                with gr.Row():
                    pre_cache_btn = gr.Button("Run Pre-caching / 运行预缓存", variant="primary")
                    stop_pre_cache_btn = gr.Button("Stop Pre-caching / 停止预缓存", variant="secondary")
                
                # 预缓存日志输出
                pre_cache_log = gr.Textbox(
                    label="预缓存日志",
                    lines=5,
                    interactive=False,
                    autoscroll=True
                )
            
            # 训练控制Tab
            with gr.Tab("训练控制"):
                # 训练按钮
                with gr.Row():
                    train_btn = gr.Button("Run Training / 开始训练", variant="primary")
                    stop_train_btn = gr.Button("Stop Training / 停止训练", variant="secondary")
                
                # 训练日志
                train_log = gr.Textbox(
                    label="训练日志",
                    lines=10,
                    interactive=False,
                    autoscroll=True
                )
        
        # 参数更新函数
        def update_settings():
            # 收集所有UI元素的值
            updated_settings = {
                "train_type": train_type.value,
                "batch_size": int(batch_size.value),
                "max_train_epochs": int(max_train_epochs.value),
                "learning_rate": learning_rate.value,
                "lr_scheduler": lr_scheduler.value,
                "lr_warmup_steps": int(lr_warmup_steps.value),
                "lr_scheduler_num_cycles": int(lr_scheduler_num_cycles.value),
                "optimizer_type": optimizer_type.value,
                "network_dim": int(network_dim.value),
                "mixed_precision": mixed_precision.value,
                "gradient_accumulation_steps": int(gradient_accumulation_steps.value),
                "timestep_sampling": timestep_sampling.value,
                "discrete_flow_shift": float(discrete_flow_shift.value),
                "max_data_loader_n_workers": int(max_data_loader_n_workers.value),
                "attention_implementation": attention_implementation.value,
                "enable_low_vram": enable_low_vram.value,
                "blocks_to_swap": int(blocks_to_swap.value),
                "num_cpu_threads_per_process": int(num_cpu_threads_per_process.value),
                "num_processes": int(num_processes.value),
                "use_network_weights": use_network_weights.value,
                "output_dir": output_dir.value,
                "output_name": output_name.value,
                "save_every_n_epochs": int(save_every_n_epochs.value),
                "save_every_n_steps": int(save_every_n_steps.value),
                "generate_samples": generate_samples.value,
                "sample_at_first": sample_at_first.value,
                "sample_every_n_epochs": int(sample_every_n_epochs.value),
                "sample_every_n_steps": int(sample_every_n_steps.value),
                "sample_prompt_text": sample_prompt_text.value,
                "sample_w": int(sample_w.value),
                "sample_h": int(sample_h.value),
                "sample_seed": int(sample_seed.value),
                "sample_steps": int(sample_steps.value),
                "log_type": log_type.value,
                "log_prefix": log_prefix.value,
                "log_dir": log_dir.value,
                "log_tracker_name": settings.get("log_tracker_name", ""),
                "custom_params": custom_params.value,
                "auto_shutdown": auto_shutdown_flag.value,
                "fp8": fp8.value,
                "custom_prompt_txt": False,
                "custom_prompt_path": "",
                "offload_inactive_dit": False,
                # 文件路径
                "dit_weights_path": dit_weights_path.value,
                "vae_path": vae_path.value,
                "text_encoder_model_path": text_encoder_model_path.value,
                "dataset_config": dataset_config.value,
                "network_weights_path": network_weights_path.value,
                "sample_image_path": sample_image_path.value
            }
            
            # 保存设置
            save_settings(updated_settings)
            
            # 返回更新后的预览
            return preview_settings(updated_settings)
        
        # 文件选择按钮事件
        dit_weights_btn.click(
            fn=lambda: select_file([("Safetensors Files", "*.safetensors"), ("All Files", "*.*")]),
            outputs=dit_weights_path
        )
        
        vae_btn.click(
            fn=lambda: select_file([("Safetensors Files", "*.safetensors"), ("All Files", "*.*")]),
            outputs=vae_path
        )
        
        text_encoder_btn.click(
            fn=lambda: select_file([("Safetensors Files", "*.safetensors"), ("All Files", "*.*")]),
            outputs=text_encoder_model_path
        )
        
        dataset_config_btn.click(
            fn=lambda: select_file([("TOML Files", "*.toml"), ("All Files", "*.*")]),
            outputs=dataset_config
        )
        
        output_dir_btn.click(
            fn=select_folder,
            outputs=output_dir
        )
        
        network_weights_btn.click(
            fn=lambda: select_file([("Safetensors Files", "*.safetensors"), ("All Files", "*.*")]),
            outputs=network_weights_path
        )
        
        sample_image_btn.click(
            fn=lambda: select_file([("Image Files", "*.png *.jpg *.jpeg"), ("All Files", "*.*")]),
            outputs=sample_image_path
        )
        
        # 保存和加载按钮
        save_btn.click(
            fn=update_settings,
            outputs=settings_preview
        )
        
        load_btn.click(
            fn=lambda: preview_settings(load_settings()),
            outputs=settings_preview
        )
        
        # 绑定所有输入变化到更新设置函数
        all_inputs = [
            train_type, batch_size, max_train_epochs, learning_rate, lr_scheduler,
            lr_warmup_steps, lr_scheduler_num_cycles, optimizer_type, network_dim, mixed_precision,
            gradient_accumulation_steps, timestep_sampling, discrete_flow_shift, max_data_loader_n_workers,
            attention_implementation, enable_low_vram, blocks_to_swap, num_cpu_threads_per_process, num_processes,
            use_network_weights, output_dir, output_name, save_every_n_epochs, save_every_n_steps,
            generate_samples, sample_at_first, sample_every_n_epochs, sample_every_n_steps, sample_prompt_text,
            sample_w, sample_h, sample_seed, sample_steps, log_type, log_prefix, log_dir, custom_params,
            auto_shutdown_flag, fp8, dit_weights_path, vae_path, text_encoder_model_path, dataset_config,
            network_weights_path, sample_image_path
        ]
        
        for input_component in all_inputs:
            input_component.change(
                fn=update_settings,
                outputs=settings_preview
            )
        
        # 绑定按钮事件
        pre_cache_btn.click(
            fn=lambda: start_pre_caching(load_settings(), ""),
            outputs=pre_cache_log
        )
        
        stop_pre_cache_btn.click(
            fn=lambda: stop_caching(""),
            outputs=pre_cache_log
        )
        
        train_btn.click(
            fn=lambda: run_wan_training(load_settings(), ""),
            outputs=train_log
        )
        
        stop_train_btn.click(
            fn=lambda: stop_train(""),
            outputs=train_log
        )
        
        # 绑定条件显示逻辑
        enable_low_vram.change(
            fn=lambda x: gr.update(visible=x),
            inputs=enable_low_vram,
            outputs=blocks_to_swap
        )
        
        use_network_weights.change(
            fn=lambda x: gr.update(visible=x),
            inputs=use_network_weights,
            outputs=network_weights_row
        )
        
        generate_samples.change(
            fn=lambda x: gr.update(visible=x),
            inputs=generate_samples,
            outputs=sample_settings_row
        )
        
        generate_samples.change(
            fn=lambda x: gr.update(visible=x),
            inputs=generate_samples,
            outputs=sample_image_row
        )
    
    return demo

# 在文件末尾添加draw_ui函数，使其与main_gradio.py兼容
def draw_ui():
    """
    绘制UI界面的函数，与原始main.py兼容的接口
    返回：Gradio界面组件
    """
    return create_ui()