import subprocess
from monitor_tools.device_monitor import gpu_monitor

@gpu_monitor(interval=0.1, filename='log/llama_gpu_usage.log')
def main():
    llama_command = "/home/kylin/gjl/project/llama.cpp/build/bin/llama-simple"  # 替换为你的llama.cpp可执行文件路径

    result = subprocess.run([llama_command], capture_output=True, text=True)

    print(result.stdout)

main()