zakariarada commited on
Commit
1e7aa72
1 Parent(s): 9f6f6b6

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +120 -0
cfg.yaml ADDED
@@ -0,0 +1,120 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: int4
3
+ gradient_checkpointing: true
4
+ intermediate_dropout: 0.0
5
+ pretrained: true
6
+ pretrained_weights: ''
7
+ augmentation:
8
+ neftune_noise_alpha: 0.0
9
+ random_parent_probability: 0.0
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ add_eos_token_to_system: true
16
+ answer_column: chosen
17
+ chatbot_author: H2O.ai
18
+ chatbot_name: h2oGPT
19
+ data_sample: 1.0
20
+ data_sample_choice:
21
+ - Train
22
+ - Validation
23
+ limit_chained_samples: true
24
+ mask_prompt_labels: true
25
+ only_last_answer: false
26
+ parent_id_column: None
27
+ personalize: false
28
+ prompt_column:
29
+ - question
30
+ prompt_column_separator: \n\n
31
+ rejected_answer_column: rejected
32
+ rejected_prompt_column: None
33
+ system_column: system
34
+ text_answer_separator: <|answer|>
35
+ text_prompt_start: <|prompt|>
36
+ text_system_start: <|system|>
37
+ train_dataframe: /home/hunterx/h2o-llmstudio/data/user/dpo/train.pq
38
+ validation_dataframe: None
39
+ validation_size: 0.01
40
+ validation_strategy: automatic
41
+ environment:
42
+ compile_model: false
43
+ deepspeed_allgather_bucket_size: 1000000
44
+ deepspeed_method: ZeRO2
45
+ deepspeed_reduce_bucket_size: 1000000
46
+ deepspeed_stage3_param_persistence_threshold: 1000000
47
+ deepspeed_stage3_prefetch_bucket_size: 1000000
48
+ find_unused_parameters: false
49
+ gpus:
50
+ - '0'
51
+ huggingface_branch: main
52
+ mixed_precision: true
53
+ mixed_precision_dtype: bfloat16
54
+ number_of_workers: 8
55
+ seed: -1
56
+ trust_remote_code: true
57
+ use_deepspeed: false
58
+ experiment_name: ambitious-grasshopper
59
+ llm_backbone: h2oai/h2o-danube3-500m-chat
60
+ logging:
61
+ log_all_ranks: false
62
+ log_step_size: absolute
63
+ logger: None
64
+ neptune_project: ''
65
+ wandb_entity: ''
66
+ wandb_project: ''
67
+ output_directory: /home/hunterx/h2o-llmstudio/output/user/ambitious-grasshopper/
68
+ prediction:
69
+ batch_size_inference: 0
70
+ do_sample: false
71
+ max_length_inference: 256
72
+ max_time: 0.0
73
+ metric: BLEU
74
+ metric_gpt_model: gpt-3.5-turbo-0301
75
+ metric_gpt_template: general
76
+ min_length_inference: 2
77
+ num_beams: 1
78
+ num_history: 4
79
+ repetition_penalty: 1.0
80
+ stop_tokens: ''
81
+ temperature: 0.0
82
+ top_k: 0
83
+ top_p: 1.0
84
+ problem_type: text_dpo_modeling
85
+ tokenizer:
86
+ add_prompt_answer_tokens: false
87
+ max_length: 512
88
+ padding_quantile: 1.0
89
+ tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}'
90
+ training:
91
+ attention_implementation: auto
92
+ batch_size: 2
93
+ beta: 0.05
94
+ differential_learning_rate: 1.0e-05
95
+ differential_learning_rate_layers: []
96
+ drop_last_batch: true
97
+ epochs: 1
98
+ evaluate_before_training: false
99
+ evaluation_epochs: 1.0
100
+ freeze_layers: []
101
+ grad_accumulation: 1
102
+ gradient_clip: 10.0
103
+ learning_rate: 0.0001
104
+ lora: true
105
+ lora_alpha: 16
106
+ lora_dropout: 0.05
107
+ lora_r: 4
108
+ lora_target_modules: ''
109
+ lora_unfreeze_layers: []
110
+ loss_function: DPOLoss
111
+ min_learning_rate_ratio: 0.0
112
+ optimizer: AdamW
113
+ save_checkpoint: last
114
+ schedule: Cosine
115
+ simpo_gamma: 1.0
116
+ train_validation_data: false
117
+ use_dora: false
118
+ use_rslora: false
119
+ warmup_epochs: 0.0
120
+ weight_decay: 0.0