use std::process::{Command, Stdio};
use crate::process::save_process_info;

pub fn run_llama_model(model_path: &str) -> Result<(), RunError> {
    let mut cmd = Command::new("llama.cpp")
        .arg("--model")
        .arg(model_path)
        .stdout(Stdio::inherit())
        .stderr(Stdio::inherit())
        .spawn()?;

    let process_info = ProcessInfo {
        pid: cmd.id() as i32,
        model: model_path.into(),
        start_time: std::time::SystemTime::now()
            .duration_since(std::time::UNIX_EPOCH)
            .unwrap()
            .as_secs(),
        command: format!("llama.cpp --model {}", model_path),
    };

    save_process_info(&process_info)?;
    cmd.wait()?;
    Ok(())
}

#[derive(Debug, thiserror::Error)]
pub enum RunError {
    #[error("Process execution failed: {0}")]
    Io(#[from] std::io::Error),
    #[error("Process tracking failed: {0}")]
    ProcessTracking(#[from] std::io::Error),
}