{ "config": { "alpha": 16, "architecture": "lora", "attn_matrices": [ "q", "k", "v" ], "composition_mode": "add", "dropout": 0.1, "init_weights": "lora", "intermediate_lora": true, "leave_out": [], "output_lora": true, "r": 64, "selfattn_lora": true, "use_gating": false }, "hidden_size": 5120, "model_class": "LlamaForCausalLM", "model_name": "meta-llama/Llama-2-13b-hf", "model_type": "llama", "name": "assistant_adapter", "version": "0.1.2" }