llama-68m-GPTQ-sharded / model.safetensors.index.json
TheBloke's picture
Upload folder using huggingface_hub
a26e784
{
"metadata": {
"total_size": 108218880
},
"weight_map": {
"lm_head.weight": "model-00003-of-00003.safetensors",
"model.embed_tokens.weight": "model-00001-of-00003.safetensors",
"model.layers.0.input_layernorm.weight": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.down_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.down_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.down_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.down_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.down_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.gate_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.gate_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.gate_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.gate_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.gate_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.up_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.up_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.up_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.up_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.0.mlp.up_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.0.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.k_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.k_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.k_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.k_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.o_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.o_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.o_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.o_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.o_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.q_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.q_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.q_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.q_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.v_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.v_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.v_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.0.self_attn.v_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.1.input_layernorm.weight": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.down_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.down_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.down_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.down_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.down_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.gate_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.gate_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.gate_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.gate_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.gate_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.up_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.up_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.up_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.up_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.1.mlp.up_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.1.post_attention_layernorm.weight": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.k_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.k_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.k_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.k_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.k_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.o_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.o_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.o_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.o_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.o_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.q_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.q_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.q_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.q_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.q_proj.scales": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.v_proj.bias": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.v_proj.g_idx": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.v_proj.qweight": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.v_proj.qzeros": "model-00002-of-00003.safetensors",
"model.layers.1.self_attn.v_proj.scales": "model-00002-of-00003.safetensors",
"model.norm.weight": "model-00002-of-00003.safetensors"
}
}