mod config;
mod kvcache;
mod model;
mod npu_matmul;
mod operators;
mod params;
mod tensor;

use model::Llama;
use safetensors::SafeTensors;
use tokenizers::Tokenizer;

use crate::config::LlamaConfigJson;

const TOKENIZER: &[u8] = include_bytes!(concat!(
    env!("CARGO_MANIFEST_DIR"),
    "/models/stories110M/tokenizer.json"
));

const CONFIG_STR: &str = include_str!(concat!(
    env!("CARGO_MANIFEST_DIR"),
    "/models/stories110M/config.json"
));

// const MODEL_DATA: &[u8] = include_bytes!(concat!(
//     env!("CARGO_MANIFEST_DIR"),
//     "/models/stories110M/model.safetensors"
// ));

fn main() {
    let config: LlamaConfigJson = serde_json::from_str(CONFIG_STR).unwrap();
    let model_file = std::fs::read("model.safetensors").unwrap();
    let safetensor = SafeTensors::deserialize(&model_file).unwrap();
    let llama = Llama::<f32>::from_safetensors(config, safetensor);
    let tokenizer = Tokenizer::from_bytes(TOKENIZER).unwrap();
    let input = "That was a beautiful small town.";
    print!("{}", input);
    let binding = tokenizer.encode(input, true).unwrap();
    let input_ids = binding.get_ids();
    let mut cache = llama.new_cache();
    llama.generate_token(&mut cache, input_ids, 500, 0.9, 30, 1.0, &mut |id| {
        tokenizer.decode(&[id], true).unwrap()
    });
}
