ilu000 commited on
Commit
cc5c985
1 Parent(s): e048ea5

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +93 -0
cfg.yaml ADDED
@@ -0,0 +1,93 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: float16
3
+ force_embedding_gradients: false
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: ''
8
+ augmentation:
9
+ random_parent_probability: 0.0
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ answer_column: output
16
+ data_sample: 1.0
17
+ data_sample_choice:
18
+ - Train
19
+ - Validation
20
+ mask_prompt_labels: true
21
+ parent_id_column: parent_id
22
+ prompt_column:
23
+ - instruction
24
+ text_answer_separator: <|answer|>
25
+ text_prompt_start: <|prompt|>
26
+ train_dataframe: data/user/oasst/train_full_allrank.pq
27
+ validation_dataframe: data/user/oasst/gpt4_val_v0.csv
28
+ validation_size: 0.01
29
+ validation_strategy: custom
30
+ environment:
31
+ compile_model: false
32
+ find_unused_parameters: false
33
+ gpus:
34
+ - '0'
35
+ - '1'
36
+ - '2'
37
+ mixed_precision: true
38
+ number_of_workers: 8
39
+ seed: -1
40
+ trust_remote_code: false
41
+ use_fsdp: false
42
+ experiment_name: h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt
43
+ llm_backbone: openlm-research/open_llama_7b_400bt_preview
44
+ logging:
45
+ logger: Neptune
46
+ neptune_project: Zoo/h2o-llm
47
+ number_of_texts: 10
48
+ output_directory: output/user/h2ogpt-gm-oasst1-en-1024-open-llama-7b-preview-400bt/
49
+ prediction:
50
+ batch_size_inference: 0
51
+ do_sample: false
52
+ max_length_inference: 512
53
+ metric: GPT3.5
54
+ min_length_inference: 2
55
+ num_beams: 1
56
+ num_history: 2
57
+ repetition_penalty: 1.2
58
+ stop_tokens: ''
59
+ temperature: 0.3
60
+ top_k: 0
61
+ top_p: 1.0
62
+ problem_type: text_causal_language_modeling
63
+ tokenizer:
64
+ add_prefix_space: false
65
+ add_prompt_answer_tokens: false
66
+ max_length: 1024
67
+ max_length_answer: 512
68
+ max_length_prompt: 512
69
+ padding_quantile: 1.0
70
+ use_fast: false
71
+ training:
72
+ batch_size: 3
73
+ differential_learning_rate: 1.0e-05
74
+ differential_learning_rate_layers: []
75
+ drop_last_batch: true
76
+ epochs: 2
77
+ evaluate_before_training: false
78
+ evaluation_epochs: 0.5
79
+ grad_accumulation: 1
80
+ gradient_clip: 0.0
81
+ learning_rate: 0.0001
82
+ lora: true
83
+ lora_alpha: 32
84
+ lora_dropout: 0.05
85
+ lora_r: 16
86
+ lora_target_modules: ''
87
+ loss_function: CrossEntropy
88
+ optimizer: AdamW
89
+ save_best_checkpoint: false
90
+ schedule: Cosine
91
+ train_validation_data: false
92
+ warmup_epochs: 0.0
93
+ weight_decay: 0.0