Quantization
Has anyone successfully quantized the Gemma models? I attempted to apply quantization through llama.cpp to make them run on my laptop, but unfortunately, I'm unable to execute the quantized versions.
In LM Studio, I received the following error:
"llama.cpp error: 'create_tensor: tensor 'output.weight' not found'"
and AssertionError
on oobabooga/text-generation-webui (model loader option was configured to 'llama.cpp')
Edit: Okay, I realized the obvious reason - their current builds simply don't support this model yet.
I have the same problem. Is there a workaround?
I am encountering the same problem while trying to load 7B Q2_K model. Points to note that I am on a laptop with 8GB ram.
Here is the complete error log
`
{
"cause": "llama.cpp error: 'create_tensor: tensor 'output.weight' not found'",
"errorData": {
"n_ctx": 2048,
"n_batch": 512,
"n_gpu_layers": 10
},
"data": {
"memory": {
"ram_capacity": "7.50 GB",
"ram_unused": "7.50 GB"
},
"gpu": {
"type": "IntelOpenCL",
"vram_recommended_capacity": 0,
"vram_unused": 0
},
"os": {
"platform": "linux",
"version": "6.7.5-arch1-1",
"supports_avx2": true
},
"app": {
"version": "0.2.15",
"downloadsDir": "/home/user/.cache/lm-studio/models"
},
"model": {}
},
"title": "Failed to load model",
"systemDiagnostics": {
"memory": {
"ram_capacity": 8058425344,
"ram_unused": 8058425344
},
"gpu": {
"type": "IntelOpenCL",
"vram_recommended_capacity": 0,
"vram_unused": 0
},
"os": {
"platform": "linux",
"version": "6.7.5-arch1-1",
"supports_avx2": true
},
"app": {
"version": "0.2.15",
"downloadsDir": "/home/user/.cache/lm-studio/models"
},
"model": {
"gguf_preview": {
"name": ".",
"arch": "llama",
"quant": "Q2_K",
"context_length": 8192,
"embedding_length": 3072,
"num_layers": 28,
"rope": {
"freq_base": 10000,
"dimension_count": 192
},
"head_count": 16,
"head_count_kv": 16
},
"filesize": 3094375360,
"config": {
"path": "/home/user/.cache/lm-studio/models/mlabonne/gemma-7b-it-GGUF/gemma-7b-it.Q2_K.gguf",
"loadConfig": {
"n_ctx": 2048,
"n_batch": 512,
"rope_freq_base": 0,
"rope_freq_scale": 0,
"n_gpu_layers": 10,
"use_mlock": true,
"main_gpu": 0,
"tensor_split": [
0
],
"seed": -1,
"f16_kv": true,
"use_mmap": true,
"no_kv_offload": false,
"num_experts_used": 0
}
}
}
}
}```
`
Same problem
"llama.cpp error: 'create_tensor: tensor 'output.weight' not found'"
{
"memory": {
"ram_capacity": "31.36 GB",
"ram_unused": "22.47 GB"
},
"gpu": {
"type": "AmdOpenCL",
"vram_recommended_capacity": "6.00 GB",
"vram_unused": "5.01 GB"
},
"os": {
"platform": "win32",
"version": "10.0.22621",
"supports_avx2": true
},
"app": {
"version": "0.2.16",
},
"model": {}
}
@leokster
@paradoxnafi
@donbalear
Did you use this script to convert the model files to GGUF?
https://github.com/ggerganov/llama.cpp/pull/5647