| { |
| "architectures": [ |
| "LlamaForConditionalGeneration" |
| ], |
| "attention_probs_dropout_prob": 0.1, |
| "hidden_act": "gelu", |
| "hidden_dropout_prob": 0.1, |
| "hidden_size": 768, |
| "initializer_range": 0.02, |
| "intermediate_size": 3072, |
| "layer_norm_epsilon": 1e-05, |
| "max_position_embeddings": 1024, |
| "model_type": "llama", |
| "num_attention_heads": 12, |
| "num_hidden_layers": 12, |
| "pad_token_id": 0, |
| "type_vocab_size": 2, |
| "vocab_size": 30522, |
| "adapter_config": { |
| "adapter_type": "lora", |
| "quantization": { |
| "bits": 4 |
| } |
| }, |
| "quantization_config": { |
| "bits": 4 |
| }, |
| "temperature": 0.04, |
| "max_new_tokens": 100, |
| "trainer": { |
| "type": "finetune", |
| "epochs": 6, |
| "batch_size": 1, |
| "eval_batch_size": 2, |
| "gradient_accumulation_steps": 16, |
| "learning_rate": 0.001, |
| "learning_rate_scheduler": { |
| "decay": "cosine" |
| } |
| }, |
| "text_defaults": { |
| "preprocessing": { |
| "max_sequence_length": 256 |
| } |
| } |
| } |