请问作者,rlhf的actor loss是否下降和正常收敛呢?能不能给一些经验的超参数设置?请教了

#20
by hepansls - opened

最近训练RLHF头很大,训练不到100个step就胡乱输出了

训练 rlhf sft 非常重要 数据集最好,在 sft 阶段和 ppo 阶段完全一致

能不能透露一点实验设置和结果的细节,比如actor-critic的一些clip和kl的一些权重以及策略之类的,因为我这边critic和actor无法同时下降,actor处于一个波动的情况

image.png
{
"output_dir": "ppo",
"overwrite_output_dir": false,
"do_train": true,
"do_eval": false,
"do_predict": false,
"evaluation_strategy": "no",
"prediction_loss_only": false,
"per_device_train_batch_size": 1,
"per_device_eval_batch_size": 8,
"per_gpu_train_batch_size": null,
"per_gpu_eval_batch_size": null,
"gradient_accumulation_steps": 4,
"eval_accumulation_steps": null,
"eval_delay": 0,
"learning_rate": 1e-05,
"weight_decay": 0.0,
"adam_beta1": 0.9,
"adam_beta2": 0.999,
"adam_epsilon": 1e-08,
"max_grad_norm": 1.0,
"num_train_epochs": 1.0,
"max_steps": -1,
"lr_scheduler_type": "cosine",
"warmup_ratio": 0.0,
"warmup_steps": 0,
"log_level": "passive",
"log_level_replica": "warning",
"log_on_each_node": true,
"logging_dir": "ppo\runs\Jun15_14-02-41_aicg",
"logging_strategy": "steps",
"logging_first_step": false,
"logging_steps": 10,
"logging_nan_inf_filter": true,
"save_strategy": "steps",
"save_steps": 300,
"save_total_limit": null,
"save_safetensors": false,
"save_on_each_node": false,
"no_cuda": false,
"use_mps_device": false,
"seed": 42,
"data_seed": null,
"jit_mode_eval": false,
"use_ipex": false,
"bf16": false,
"fp16": false,
"fp16_opt_level": "O1",
"half_precision_backend": "auto",
"bf16_full_eval": false,
"fp16_full_eval": false,
"tf32": null,
"local_rank": 0,
"ddp_backend": null,
"tpu_num_cores": null,
"tpu_metrics_debug": false,
"debug": [],
"dataloader_drop_last": false,
"eval_steps": null,
"dataloader_num_workers": 0,
"past_index": -1,
"run_name": "ppo",
"disable_tqdm": false,
"remove_unused_columns": true,
"label_names": null,
"load_best_model_at_end": false,
"metric_for_best_model": null,
"greater_is_better": null,
"ignore_data_skip": false,
"sharded_ddp": [],
"fsdp": [],
"fsdp_min_num_params": 0,
"fsdp_config": {
"fsdp_min_num_params": 0,
"xla": false,
"xla_fsdp_grad_ckpt": false
},
"fsdp_transformer_layer_cls_to_wrap": null,
"deepspeed": null,
"label_smoothing_factor": 0.0,
"optim": "adamw_torch",
"optim_args": null,
"adafactor": false,
"group_by_length": false,
"length_column_name": "length",
"report_to": [
"tensorboard"
],
"ddp_find_unused_parameters": false,
"ddp_bucket_cap_mb": null,
"dataloader_pin_memory": true,
"skip_memory_metrics": true,
"use_legacy_prediction_loop": false,
"push_to_hub": false,
"resume_from_checkpoint": null,
"hub_model_id": null,
"hub_strategy": "every_save",
"hub_token": "",
"hub_private_repo": false,
"gradient_checkpointing": false,
"include_inputs_for_metrics": false,
"fp16_backend": "auto",
"push_to_hub_model_id": null,
"push_to_hub_organization": null,
"push_to_hub_token": "",
"mp_parameters": "",
"auto_find_batch_size": false,
"full_determinism": false,
"torchdynamo": null,
"ray_scope": "last",
"ddp_timeout": 1800,
"torch_compile": false,
"torch_compile_backend": null,
"torch_compile_mode": null,
"xpu_backend": null,
"sortish_sampler": false,
"predict_with_generate": false,
"generation_max_length": null,
"generation_num_beams": null,
"generation_config": null
}

我没有注意这些细节我,只注意到loss一直不高

Sign up or log in to comment