use std::path::Path;
use crate::run;

pub async fn warmup_model(model_path: &Path) -> Result<(), WarmupError> {
    // 初始化推理上下文
    let mut ctx = run::init_model_context(model_path)?;
    
    // 运行空推理
    let dummy_input = vec![0; 128];
    run::inference(&mut ctx, &dummy_input)?;
    
    // 预热GPU（如果可用）
    #[cfg(feature = "cuda")]
    run::warmup_gpu()?;

    Ok(())
}

#[derive(Debug, thiserror::Error)]
pub enum WarmupError {
    #[error("Initialization failed: {0}")]
    Init(#[from] run::RunError),
    #[error("Inference error: {0}")]
    Inference(String),
}