overenginar commited on
Commit
19d0ef1
1 Parent(s): 2957234

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +102 -0
cfg.yaml ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: int4
3
+ force_embedding_gradients: false
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: ''
8
+ augmentation:
9
+ random_parent_probability: 0.0
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ add_eos_token_to_system: true
16
+ answer_column: output
17
+ chatbot_author: H2O.ai
18
+ chatbot_name: h2oGPT
19
+ data_sample: 1.0
20
+ data_sample_choice:
21
+ - Train
22
+ - Validation
23
+ limit_chained_samples: false
24
+ mask_prompt_labels: true
25
+ parent_id_column: parent_id
26
+ personalize: false
27
+ prompt_column:
28
+ - instruction
29
+ system_column: None
30
+ text_answer_separator: <|answer|>
31
+ text_prompt_start: <|prompt|>
32
+ text_system_start: <|system|>
33
+ train_dataframe: /workspace/data/user/oasst/train_full.pq
34
+ validation_dataframe: None
35
+ validation_size: 0.01
36
+ validation_strategy: automatic
37
+ environment:
38
+ compile_model: false
39
+ find_unused_parameters: false
40
+ gpus:
41
+ - '0'
42
+ - '1'
43
+ - '2'
44
+ - '3'
45
+ huggingface_branch: main
46
+ mixed_precision: true
47
+ number_of_workers: 8
48
+ seed: 42
49
+ trust_remote_code: true
50
+ use_fsdp: false
51
+ experiment_name: open_llama_7b_oasst
52
+ llm_backbone: openlm-research/open_llama_7b
53
+ logging:
54
+ logger: Neptune
55
+ neptune_project: overenginar/h2ollmstudio
56
+ output_directory: /workspace/output/user/open_llama_7b_oasst/
57
+ prediction:
58
+ batch_size_inference: 0
59
+ do_sample: false
60
+ max_length_inference: 256
61
+ metric: Perplexity
62
+ metric_gpt_model: gpt-3.5-turbo-0301
63
+ min_length_inference: 2
64
+ num_beams: 1
65
+ num_history: 4
66
+ repetition_penalty: 1.2
67
+ stop_tokens: ''
68
+ temperature: 0.3
69
+ top_k: 0
70
+ top_p: 1.0
71
+ problem_type: text_causal_language_modeling
72
+ tokenizer:
73
+ add_prefix_space: false
74
+ add_prompt_answer_tokens: false
75
+ max_length: 512
76
+ max_length_answer: 256
77
+ max_length_prompt: 256
78
+ padding_quantile: 1.0
79
+ use_fast: true
80
+ training:
81
+ batch_size: 2
82
+ differential_learning_rate: 1.0e-05
83
+ differential_learning_rate_layers: []
84
+ drop_last_batch: true
85
+ epochs: 1
86
+ evaluate_before_training: true
87
+ evaluation_epochs: 1.0
88
+ grad_accumulation: 1
89
+ gradient_clip: 0.0
90
+ learning_rate: 0.0001
91
+ lora: true
92
+ lora_alpha: 32
93
+ lora_dropout: 0.05
94
+ lora_r: 32
95
+ lora_target_modules: ''
96
+ loss_function: TokenAveragedCrossEntropy
97
+ optimizer: AdamW8bit
98
+ save_best_checkpoint: false
99
+ schedule: Cosine
100
+ train_validation_data: false
101
+ warmup_epochs: 0.0
102
+ weight_decay: 0.0