kcheung commited on
Commit
297113b
1 Parent(s): b4d7a9b

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +98 -0
cfg.yaml ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: float16
3
+ force_embedding_gradients: false
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: ''
8
+ augmentation:
9
+ random_parent_probability: 0.0
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ answer_column: "Answer\r"
16
+ chatbot_author: H2O.ai
17
+ chatbot_name: h2oGPT
18
+ data_sample: 1.0
19
+ data_sample_choice:
20
+ - Train
21
+ - Validation
22
+ limit_chained_samples: false
23
+ mask_prompt_labels: true
24
+ parent_id_column: None
25
+ personalize: false
26
+ prompt_column:
27
+ - Question
28
+ text_answer_separator: <|answer|>
29
+ text_prompt_start: <|prompt|>
30
+ train_dataframe: data/user/fine_tune_QA/fine_tune_QA.csv
31
+ validation_dataframe: None
32
+ validation_size: 0.01
33
+ validation_strategy: automatic
34
+ environment:
35
+ compile_model: false
36
+ find_unused_parameters: false
37
+ gpus:
38
+ - '0'
39
+ - '1'
40
+ - '2'
41
+ - '3'
42
+ mixed_precision: true
43
+ number_of_workers: 48
44
+ seed: -1
45
+ trust_remote_code: true
46
+ use_fsdp: false
47
+ experiment_name: text_gen_QA_001
48
+ llm_backbone: NousResearch/Nous-Hermes-Llama2-13b
49
+ logging:
50
+ logger: None
51
+ neptune_project: ''
52
+ number_of_texts: 10
53
+ output_directory: output/user/text_gen_QA_001/
54
+ prediction:
55
+ batch_size_inference: 0
56
+ do_sample: false
57
+ max_length_inference: 256
58
+ metric: BLEU
59
+ min_length_inference: 2
60
+ num_beams: 1
61
+ num_history: 2
62
+ repetition_penalty: 1.2
63
+ stop_tokens: ''
64
+ temperature: 0.3
65
+ top_k: 0
66
+ top_p: 1.0
67
+ problem_type: text_causal_language_modeling
68
+ tokenizer:
69
+ add_prefix_space: false
70
+ add_prompt_answer_tokens: false
71
+ max_length: 1632
72
+ max_length_answer: 1168
73
+ max_length_prompt: 1168
74
+ padding_quantile: 1.0
75
+ use_fast: true
76
+ training:
77
+ batch_size: 4
78
+ differential_learning_rate: 1.0e-05
79
+ differential_learning_rate_layers: []
80
+ drop_last_batch: true
81
+ epochs: 20
82
+ evaluate_before_training: false
83
+ evaluation_epochs: 1.0
84
+ grad_accumulation: 1
85
+ gradient_clip: 0.0
86
+ learning_rate: 0.0001
87
+ lora: true
88
+ lora_alpha: 16
89
+ lora_dropout: 0.05
90
+ lora_r: 4
91
+ lora_target_modules: ''
92
+ loss_function: TokenAveragedCrossEntropy
93
+ optimizer: AdamW
94
+ save_best_checkpoint: false
95
+ schedule: Cosine
96
+ train_validation_data: false
97
+ warmup_epochs: 0.0
98
+ weight_decay: 0.0