File size: 279 Bytes
4d62f9b
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
{
    "model": "Llama",
    "quantization": "F16",
    "quantization_version": "Not_Quantized",
    "container": "GGML",
    "converter": "llm-rs",
    "hash": "927ec01b298916bafe7b9cbeedd4563bad016f11b2365621042322e894d6f493",
    "base_model": "openlm-research/open_llama_3b"
}