Stevross commited on
Commit
7cf52d9
1 Parent(s): d2849c6

Upload cfg.yaml

Browse files
Files changed (1) hide show
  1. cfg.yaml +113 -0
cfg.yaml ADDED
@@ -0,0 +1,113 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ architecture:
2
+ backbone_dtype: float16
3
+ force_embedding_gradients: false
4
+ gradient_checkpointing: true
5
+ intermediate_dropout: 0.0
6
+ pretrained: true
7
+ pretrained_weights: output/user/Astrid-LLama-3B.1.1.1/checkpoint.pth
8
+ augmentation:
9
+ random_parent_probability: 0.0
10
+ skip_parent_probability: 0.0
11
+ token_mask_probability: 0.0
12
+ dataset:
13
+ add_eos_token_to_answer: true
14
+ add_eos_token_to_prompt: true
15
+ answer_column: "response\r"
16
+ chatbot_author: Paix.Cloud
17
+ chatbot_name: Astrid
18
+ data_sample: 1.0
19
+ data_sample_choice:
20
+ - Train
21
+ - Validation
22
+ limit_chained_samples: false
23
+ mask_prompt_labels: true
24
+ parent_id_column: None
25
+ personalize: true
26
+ prompt_column:
27
+ - instruction
28
+ - input
29
+ text_answer_separator: <|answer|>
30
+ text_prompt_start: <|prompt|>
31
+ train_dataframe: data/user/gpt4-dataset/gpt4-instruct-dedupe-only-dataset.csv
32
+ validation_dataframe: None
33
+ validation_size: 0.01
34
+ validation_strategy: automatic
35
+ environment:
36
+ compile_model: false
37
+ find_unused_parameters: false
38
+ gpus:
39
+ - '0'
40
+ huggingface_branch: main
41
+ mixed_precision: true
42
+ number_of_workers: 8
43
+ seed: -1
44
+ trust_remote_code: true
45
+ use_fsdp: false
46
+ experiment_name: Astrid-LLama-3B.1.1.1.1
47
+ llm_backbone: openlm-research/open_llama_3b
48
+ logging:
49
+ logger: Neptune
50
+ neptune_project: llmstudio
51
+ number_of_texts: 10
52
+ output_directory: output/user/Astrid-LLama-3B.1.1.1.1/
53
+ prediction:
54
+ batch_size_inference: 0
55
+ do_sample: false
56
+ max_length_inference: 256
57
+ metric: BLEU
58
+ metric_gpt_model: gpt-3.5-turbo-0301
59
+ min_length_inference: 2
60
+ num_beams: 1
61
+ num_history: 2
62
+ repetition_penalty: 1.2
63
+ stop_tokens: ''
64
+ temperature: 0.3
65
+ top_k: 0
66
+ top_p: 1.0
67
+ problem_type: text_causal_language_modeling
68
+ tokenizer:
69
+ add_prefix_space: false
70
+ add_prompt_answer_tokens: false
71
+ max_length: 512
72
+ max_length_answer: 256
73
+ max_length_prompt: 256
74
+ padding_quantile: 1.0
75
+ use_fast: false
76
+ training:
77
+ adaptive_kl_control: true
78
+ advantages_gamma: 0.99
79
+ advantages_lambda: 0.95
80
+ batch_size: 18
81
+ differential_learning_rate: 1.0e-05
82
+ differential_learning_rate_layers: []
83
+ drop_last_batch: true
84
+ epochs: 17
85
+ evaluate_before_training: false
86
+ evaluation_epochs: 1.0
87
+ grad_accumulation: 1
88
+ gradient_clip: 0.0
89
+ initial_kl_coefficient: 0.2
90
+ kl_horizon: 10000
91
+ kl_target: 6.0
92
+ learning_rate: 0.0001
93
+ lora: true
94
+ lora_alpha: 16
95
+ lora_dropout: 0.05
96
+ lora_r: 4
97
+ lora_target_modules: ''
98
+ loss_function: TokenAveragedCrossEntropy
99
+ offload_reward_model: false
100
+ optimizer: AdamW
101
+ ppo_batch_size: 1
102
+ ppo_clip_policy: 0.2
103
+ ppo_clip_value: 0.2
104
+ ppo_epochs: 4
105
+ ppo_generate_temperature: 1.0
106
+ reward_model: OpenAssistant/reward-model-deberta-v3-large-v2
107
+ save_best_checkpoint: false
108
+ scaling_factor_value_loss: 0.1
109
+ schedule: Cosine
110
+ train_validation_data: false
111
+ use_rlhf: false
112
+ warmup_epochs: 0.0
113
+ weight_decay: 0.0