Aaryan-Nakhat commited on
Commit
9011b31
1 Parent(s): ec0f055

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +109 -0
cfg.yaml ADDED
@@ -0,0 +1,109 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: bfloat16
3
+ force_embedding_gradients: false
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: ''
8
+ augmentation:
9
+ neftune_noise_alpha: 5.0
10
+ random_parent_probability: 0.0
11
+ skip_parent_probability: 0.0
12
+ token_mask_probability: 0.0
13
+ dataset:
14
+ add_eos_token_to_answer: true
15
+ add_eos_token_to_prompt: true
16
+ add_eos_token_to_system: true
17
+ answer_column: ground_truth
18
+ chatbot_author: H2O.ai
19
+ chatbot_name: h2oGPT
20
+ data_sample: 1.0
21
+ data_sample_choice:
22
+ - Train
23
+ - Validation
24
+ limit_chained_samples: false
25
+ mask_prompt_labels: true
26
+ parent_id_column: None
27
+ personalize: false
28
+ prompt_column:
29
+ - prompt
30
+ system_column: None
31
+ text_answer_separator: <|answer|>
32
+ text_prompt_start: <|prompt|>
33
+ text_system_start: <|system|>
34
+ train_dataframe: /home/aaryan/llm_studio_experiments/experiment_45/h2o-llmstudio/data/user/data_for_LLM_Studio_exp_45_intelligent_layer_2_plus_exp_39_data_finetuning/train_data_for_LLM_Studio_exp_45_intelligent_layer_2_plus_exp_39_data_finetuning.csv
35
+ validation_dataframe: /home/aaryan/llm_studio_experiments/experiment_45/h2o-llmstudio/data/user/data_for_LLM_Studio_exp_45_intelligent_layer_2_plus_exp_39_data_finetuning/validation_data_for_LLM_Studio_exp_45_intelligent_layer_2_plus_exp_39_data_finetuning.csv
36
+ validation_size: 0.01
37
+ validation_strategy: custom
38
+ environment:
39
+ compile_model: false
40
+ deepspeed_allgather_bucket_size: 1000000
41
+ deepspeed_method: ZeRO2
42
+ deepspeed_reduce_bucket_size: 1000000
43
+ deepspeed_stage3_param_persistence_threshold: 1000000
44
+ deepspeed_stage3_prefetch_bucket_size: 1000000
45
+ find_unused_parameters: false
46
+ gpus:
47
+ - '0'
48
+ - '1'
49
+ huggingface_branch: main
50
+ mixed_precision: false
51
+ mixed_precision_dtype: bfloat16
52
+ number_of_workers: 8
53
+ seed: 77
54
+ trust_remote_code: true
55
+ use_deepspeed: false
56
+ experiment_name: experiment_45_intelligent_layer_2_plus_exp_39_data
57
+ llm_backbone: meta-llama/Meta-Llama-3-8B-Instruct
58
+ logging:
59
+ logger: None
60
+ neptune_project: ''
61
+ output_directory: /home/aaryan/llm_studio_experiments/experiment_45/h2o-llmstudio/output/user/experiment_45_intelligent_layer_2_plus_exp_39_data/
62
+ prediction:
63
+ batch_size_inference: 2
64
+ do_sample: true
65
+ max_length_inference: 192
66
+ max_time: 0.0
67
+ metric: BLEU
68
+ metric_gpt_model: gpt-3.5-turbo-0301
69
+ metric_gpt_template: general
70
+ min_length_inference: 1
71
+ num_beams: 1
72
+ num_history: 4
73
+ repetition_penalty: 1.2
74
+ stop_tokens: ''
75
+ temperature: 0.3
76
+ top_k: 50
77
+ top_p: 0.95
78
+ problem_type: text_causal_language_modeling
79
+ tokenizer:
80
+ add_prompt_answer_tokens: false
81
+ max_length: 2592
82
+ max_length_answer: 192
83
+ max_length_prompt: 2400
84
+ padding_quantile: 1.0
85
+ use_fast: true
86
+ training:
87
+ batch_size: 4
88
+ differential_learning_rate: 1.0e-05
89
+ differential_learning_rate_layers: []
90
+ drop_last_batch: true
91
+ epochs: 2
92
+ evaluate_before_training: false
93
+ evaluation_epochs: 1.0
94
+ grad_accumulation: 2
95
+ gradient_clip: 0.0
96
+ learning_rate: 0.0001
97
+ lora: true
98
+ lora_alpha: 32
99
+ lora_dropout: 0.05
100
+ lora_r: 16
101
+ lora_target_modules: q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj
102
+ loss_function: TokenAveragedCrossEntropy
103
+ optimizer: AdamW
104
+ save_checkpoint: best
105
+ schedule: Cosine
106
+ train_validation_data: false
107
+ use_flash_attention_2: true
108
+ warmup_epochs: 0.05
109
+ weight_decay: 0.0