aws_output_bucket: s3://sagemaker-us-east-1-107457652907/experiments/llama.open.7b.300bt.zh.10M.v1.0.seq2048.w16.adamw.NA100.0515.ds data_dir: null train_file: /opt/ml/input/data/train dev_file: null test_file: null model: _target_: models.llama.LlamaForConditionalGeneration.from_pretrained vocab_size: 32001 pad_token_id: 32000 use_peft: false gradient_checkpointing: true read_tensor: _target_: data.collators.zh_instruct.TextDatasetUnify extended_vocab: null collator: _target_: data.collators.flan.FlanCollatorOverCollator collator: null max_seq_length: 2048 tokenizer: ${model_name_or_path} decoder_only: true num_workers: 4 prefetch_factor: 2 do_preprocess: false model_name_or_path: /tmp/open-llama-7b-preview-300bt pretrain: null exp_name: llama.open.7b.300bt.zh.10M.v1.0.seq2048.w16.adamw.NA100.0515.ds exp_notes: null output_dir: /tmp/${exp_name} resume: null do_train: true evaluate_during_training: false do_eval: false eval_sub_path: checkpoint-* per_gpu_train_batch_size: 4 per_gpu_eval_batch_size: 4 learning_rate: 1.0e-05 gradient_accumulation_steps: 128 weight_decay: 0.0 adam_epsilon: 1.0e-06 adam_betas: (0.9, 0.99) max_grad_norm: 5.0 num_train_epochs: 1 total_dataset_len: -1 max_steps: 0 warmup_proportion: 0.01 warmup_steps: 0 optimizer: null use_nvlamb: null bit_training: null logging_steps: 1 save_best: false save_steps: 250 eval_steps: 250 ddp_eval: true no_cuda: false seed: 42 local_rank: 0 fp16: true fp16_opt_level: O1 fp16_bfloat16: true prediction_cfg: metric: acc measure: 1 best_checkpoint: null best_result: null eval_forward_fn: _target_: general_util.evaluator.DiscriminatorForwardFn post_process: null fairscale_config: _target_: general_util.fsdp_utils.default_initialize fp16: ${fp16} move_grads_to_cpu: false move_params_to_cpu: false flatten_parameters: false with_lightseq: false ds_cfg: train_micro_batch_size_per_gpu: ${per_gpu_train_batch_size} gradient_accumulation_steps: ${gradient_accumulation_steps} optimizer: type: AdamW params: lr: ${learning_rate} betas: - 0.9 - 0.99 eps: ${adam_epsilon} weight_decay: ${weight_decay} scheduler: type: WarmupLR params: warmup_max_lr: ${learning_rate} warmup_num_steps: 18 warmup_type: linear gradient_clipping: ${max_grad_norm} bf16: enabled: ${fp16} zero_optimization: stage: 1 contiguous_gradients: true overlap_comm: true reduce_scatter: true reduce_bucket_size: 500000000.0 allgather_bucket_size: 500000000.0 steps_per_print: 1024 summary_helper: _target_: general_util.tensorboard_helper.WandbWriter batch_index_or_keys: null outputs_index_or_keys: null n_gpu: 1 device: cuda:0 train_batch_size: 4 eval_batch_size: null world_size: 16 world_rank: null