NeMo
File size: 5,967 Bytes
365b81f
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
mcore_gpt: true
micro_batch_size: 1
global_batch_size: 256
tensor_model_parallel_size: 8
pipeline_model_parallel_size: 4
virtual_pipeline_model_parallel_size: null
encoder_seq_length: 4096
max_position_embeddings: 4096
num_layers: 96
hidden_size: 18432
ffn_hidden_size: 73728
num_attention_heads: 96
init_method_std: 0.0063
use_scaled_init_method: true
hidden_dropout: 0.0
attention_dropout: 0.0
ffn_dropout: 0.0
kv_channels: null
apply_query_key_layer_scaling: true
normalization: layernorm1p
layernorm_epsilon: 1.0e-05
do_layer_norm_weight_decay: false
make_vocab_size_divisible_by: 128
pre_process: true
post_process: true
persist_layer_norm: true
bias: false
activation: squared-relu
headscale: false
transformer_block_type: pre_ln
openai_gelu: false
normalize_attention_scores: true
position_embedding_type: rope
rotary_percentage: 0.5
attention_type: multihead
share_embeddings_and_output_weights: false
num_query_groups: 8
tokenizer:
  library: sentencepiece
  type: null
  model: nemo:8223bf8eaa194eb8920af568bb52e2d0_megatron_2.model
  vocab_file: null
  merge_file: null
  tokenizer_model: nemo:eb5528fdec5c4083affa2c97958eeef7_megatron_2.model
  sentencepiece_legacy: false
native_amp_init_scale: 4294967296
native_amp_growth_interval: 1000
hysteresis: 2
fp32_residual_connection: false
fp16_lm_cross_entropy: false
megatron_amp_O2: true
grad_allreduce_chunk_size_mb: 125
grad_div_ar_fusion: true
gradient_accumulation_fusion: false
bias_activation_fusion: false
bias_dropout_add_fusion: false
masked_softmax_fusion: true
seed: 1234
resume_from_checkpoint: null
use_cpu_initialization: false
onnx_safe: false
apex_transformer_log_level: 30
gradient_as_bucket_view: false
sync_batch_comm: false
activations_checkpoint_granularity: null
activations_checkpoint_method: null
activations_checkpoint_num_layers: 1
num_micro_batches_with_partial_activation_checkpoints: null
activations_checkpoint_layers_per_pipeline: null
sequence_parallel: false
transformer_engine: false
fp8: false
fp8_e4m3: false
fp8_hybrid: false
fp8_margin: 0
fp8_interval: 1
fp8_amax_history_len: 1
fp8_amax_compute_algo: most_recent
reduce_amax: true
use_emha: false
optim:
  name: distributed_fused_adam
  lr: 3.002e-07
  weight_decay: 0.1
  betas:
  - 0.9
  - 0.98
  sched:
    name: CosineAnnealing
    warmup_steps: 10
    constant_steps: 400
    min_lr: 3.0e-07
  bucket_cap_mb: 200
  overlap_grad_sync: false
  contiguous_grad_buffer: true
precision: bf16-mixed
data:
  chat: true
  chat_prompt_tokens:
    system_turn_start: <extra_id_0>
    turn_start: <extra_id_1>
    label_start: <extra_id_2>
    end_of_turn: '

      '
    end_of_name: '

      '
  sample: true
  num_workers: 2
  dataloader_type: single
  train_ds:
    file_path: /dataset/daring-anteater_commercial.shuf.removelong.jsonl
    global_batch_size: 128
    micro_batch_size: 1
    shuffle: true
    memmap_workers: null
    max_seq_length: 4096
    min_seq_length: 1
    drop_last: true
    concat_sampling_probabilities: null
    label_key: output
    add_eos: false
    add_sep: false
    add_bos: false
    truncation_field: input
    index_mapping_dir: /indexmap_dir
    prompt_template: '<extra_id_0>System

      {system message}

      <extra_id_1>User

      {turn 1 user message}

      <extra_id_1>Assistant

      <extra_id_2>{turn 1 assistant label}

      {turn 1 assistant message}

      <extra_id_1>User

      {turn 2 user message}

      <extra_id_1>Assistant

      <extra_id_2>{turn 2 assistant label}

      {turn 2 assistant message}

      <extra_id_1>'
    hf_dataset: true
    truncation_method: right
  validation_ds:
    file_path: /dataset/daring-anteater_commercial.shuf.removelong.jsonl
    names: null
    global_batch_size: 128
    micro_batch_size: 1
    shuffle: false
    memmap_workers: null
    max_seq_length: 4096
    min_seq_length: 1
    drop_last: false
    label_key: output
    add_eos: false
    add_sep: false
    add_bos: false
    write_predictions_to_file: false
    output_file_path_prefix: null
    truncation_field: input
    index_mapping_dir: /indexmap_dir
    prompt_template: '<extra_id_0>System

      {system message}

      <extra_id_1>User

      {turn 1 user message}

      <extra_id_1>Assistant

      <extra_id_2>{turn 1 assistant label}

      {turn 1 assistant message}

      <extra_id_1>User

      {turn 2 user message}

      <extra_id_1>Assistant

      <extra_id_2>{turn 2 assistant label}

      {turn 2 assistant message}

      <extra_id_1>'
    tokens_to_generate: 32
    hf_dataset: true
    truncation_method: right
    metric:
      name: loss
      average: null
      num_classes: null
  test_ds:
    prompt_template: '<extra_id_0>System

      {system message}

      <extra_id_1>User

      {turn 1 user message}

      <extra_id_1>Assistant

      <extra_id_2>{turn 1 assistant label}

      {turn 1 assistant message}

      <extra_id_1>User

      {turn 2 user message}

      <extra_id_1>Assistant

      <extra_id_2>{turn 2 assistant label}

      {turn 2 assistant message}

      <extra_id_1>'
  data_impl: jsonl
  splits_string: null
  seq_length: 4096
  skip_warmup: true
  reset_position_ids: false
  reset_attention_mask: false
  eod_mask_loss: false
  index_mapping_dir: /indexmap_dir
  data_prefix:
    train:
    - /datasets/v30_benign-walrus_clip153600.jsonl
    validation:
    - /datasets/v30_benign-walrus_clip153600.jsonl
    test:
    - /datasets/v30_benign-walrus_clip153600.jsonl
answer_only_loss: true
restore_from_path: /models/340B_100p_CT_100B
save_nemo_on_validation_end: true
use_flash_attention: null
pipeline_model_parallel_split_rank: 0
dpo:
  log_prob_forward_micro_batch_size: 2
  ref_policy_kl_penalty: 0.3
  average_log_probs: false
  sft_loss_coeff: 1.0e-05
  optimize_ref_policy_kl_penalty: false
  preference_loss: reward_rev_dpo
  gt_reward_scale: 1.0
apply_rope_fusion: false
target: nemo.collections.nlp.models.language_modeling.megatron_gpt_model.MegatronGPTModel
nemo_version: 1.22.0