File size: 2,833 Bytes
0e48edf |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 |
architecture:
backbone_dtype: float32
gradient_checkpointing: true
intermediate_dropout: 0.0
pretrained: true
pretrained_weights: /root/h2o-llmstudio/output/user/masked-mamba/checkpoint.pth
augmentation:
neftune_noise_alpha: 0.0
random_parent_probability: 0.0
skip_parent_probability: 0.0
token_mask_probability: 0.1
dataset:
add_eos_token_to_answer: false
add_eos_token_to_prompt: false
add_eos_token_to_system: false
add_prompt_answer_tokens: false
answer_column: score
chatbot_author: H2O.ai
chatbot_name: h2oGPT
data_sample: 1.0
data_sample_choice:
- Train
- Validation
limit_chained_samples: false
mask_prompt_labels: true
num_classes: 6
parent_id_column: None
personalize: false
prompt_column:
- full_text
system_column: None
text_answer_separator: ''
text_prompt_start: ''
text_system_start: ''
train_dataframe: /root/h2o-llmstudio/data/user/essay_train/essay_train.csv
validation_dataframe: /root/h2o-llmstudio/data/user/essay_train/essay_train.csv
validation_size: 0.01
validation_strategy: automatic
environment:
compile_model: false
deepspeed_allgather_bucket_size: 1000000
deepspeed_method: ZeRO2
deepspeed_reduce_bucket_size: 1000000
deepspeed_stage3_param_persistence_threshold: 1000000
deepspeed_stage3_prefetch_bucket_size: 1000000
find_unused_parameters: false
gpus:
- '0'
huggingface_branch: main
mixed_precision: true
mixed_precision_dtype: bfloat16
number_of_workers: 8
seed: -1
trust_remote_code: true
use_deepspeed: false
experiment_name: masked-fat-mamba
llm_backbone: h2oai/h2ogpt-4096-llama2-7b
logging:
logger: Neptune
neptune_project: samvelkoch/essay
output_directory: /root/h2o-llmstudio/output/user/masked-fat-mamba/
prediction:
batch_size_inference: 0
metric: Accuracy
problem_type: text_causal_classification_modeling
tokenizer:
add_prompt_answer_tokens: false
max_length: 10240
padding_quantile: 1.0
tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
training:
batch_size: 2
differential_learning_rate: 1.0e-05
differential_learning_rate_layers:
- classification_head
drop_last_batch: true
epochs: 1
evaluate_before_training: false
evaluation_epochs: 1.0
freeze_layers: []
grad_accumulation: 1
gradient_clip: 0.0
learning_rate: 0.0001
lora: true
lora_alpha: 16
lora_dropout: 0.05
lora_r: 4
lora_target_modules: ''
lora_unfreeze_layers: []
loss_function: CrossEntropyLoss
optimizer: AdamW
save_checkpoint: last
schedule: Cosine
train_validation_data: false
use_dora: false
use_flash_attention_2: true
warmup_epochs: 0.0
weight_decay: 1.0e-05
|