llama-adapter-7b / adapter_config.json
winglian's picture
Upload 2 files
e8e4782
{
"adapter_layers": 30,
"adapter_len": 10,
"base_model_name_or_path": "decapoda-research/llama-7b-hf",
"inference_mode": true,
"peft_type": "ADAPTION_PROMPT",
"target_modules": "self_attn",
"task_type": "CAUSAL_LM"
}