import torch
import os
import subprocess
from datetime import datetime

# 配置路径
data_dir = r"D:\train\south-building\output"
output_dir = r"D:\train\south-building\results"
timestamp = datetime.now().strftime("%Y-%m-%d_%H%M%S")
experiment_name = f"nerfacto_{timestamp}"

# 创建结果目录
os.makedirs(output_dir, exist_ok=True)

# 配置训练命令（保留可视化参数）
train_command = [
    "ns-train",
    "nerfacto",
    "--data", data_dir,
    "--output-dir", output_dir,
    "--experiment-name", experiment_name,
    "--vis", "viewer",  # 强制启用可视化
    "--viewer.websocket-port", "7007",  # 固定可视化端口
    "--max-num-iterations", "50000",
    "--pipeline.model.predict-normals", "True",
    "--pipeline.model.camera-optimizer.mode", "off",
]

# GPU内存优化（保持不变）
if torch.cuda.is_available():
    gpu_info = subprocess.check_output(["nvidia-smi", "--query-gpu=memory.total", "--format=csv,nounits,noheader"])
    total_memory = int(gpu_info.strip())
    if total_memory < 12000:
        train_command.extend([
            "--pipeline.model.near-plane", "0.05",
            "--pipeline.model.far-plane", "100.0",
            "--pipeline.datamanager.train-num-rays-per-batch", "4096",
            "--pipeline.datamanager.eval-num-rays-per-batch", "4096",
        ])

# 执行训练（关键：实时输出日志）
try:
    print(f"开始训练: {experiment_name}")
    print(f"数据目录: {data_dir}")
    print(f"结果保存至: {os.path.join(output_dir, experiment_name)}")
    print("正在启动训练，请等待可视化链接...\n")

    # 关键修改：不捕获输出，直接将日志打印到控制台
    # 这样可以看到完整的训练进度（迭代次数、损失值）和可视化链接
    process = subprocess.run(
        train_command,
        check=True,
        # 不捕获输出，直接显示在控制台（解决进度不显示的核心）
        stdout=None,
        stderr=None,
        shell=True  # Windows下建议开启，确保命令正确执行
    )

    print("\n训练完成! 可视化界面仍可访问：http://localhost:7007")

except subprocess.CalledProcessError as e:
    print(f"\n训练失败，返回码: {e.returncode}")
    raise
except Exception as e:
    print(f"\n发生错误: {str(e)}")
    raise