|
{ |
|
"_frozen": true, |
|
"_n_gpu": 1, |
|
"adafactor": false, |
|
"adam_beta1": 0.9, |
|
"adam_beta2": 0.999, |
|
"adam_epsilon": 1e-06, |
|
"architectures": [ |
|
"InversionFromLogitsEmbModel" |
|
], |
|
"auto_find_batch_size": false, |
|
"bf16": true, |
|
"bf16_full_eval": false, |
|
"cache_dir": null, |
|
"cheat_on_train_hypotheses": false, |
|
"config_name": null, |
|
"config_overrides": null, |
|
"corrector_ignore_hypothesis_embedding": false, |
|
"corrector_model_alias": null, |
|
"corrector_model_from_pretrained": null, |
|
"data_seed": null, |
|
"dataloader_drop_last": false, |
|
"dataloader_num_workers": 0, |
|
"dataloader_pin_memory": true, |
|
"dataset_name": "one_million_instructions", |
|
"ddp_backend": null, |
|
"ddp_broadcast_buffers": null, |
|
"ddp_bucket_cap_mb": null, |
|
"ddp_find_unused_parameters": true, |
|
"ddp_timeout": 1800, |
|
"debug": [], |
|
"decoder_dropout_disabled": false, |
|
"deepspeed": null, |
|
"deepspeed_plugin": null, |
|
"disable_tqdm": true, |
|
"dispatch_batches": null, |
|
"do_eval": false, |
|
"do_predict": false, |
|
"do_train": false, |
|
"embedder_fake_with_zeros": false, |
|
"embedder_model_api": null, |
|
"embedder_model_name": "meta-llama/Llama-2-7b-hf", |
|
"embedder_no_grad": true, |
|
"embedder_torch_dtype": "bfloat16", |
|
"embedding_transform_strategy": "repeat", |
|
"embedding_zero_except_topk": null, |
|
"embeddings_from_layer_n": null, |
|
"encoder_dropout_disabled": false, |
|
"eval_accumulation_steps": null, |
|
"eval_delay": 0, |
|
"eval_steps": 3125, |
|
"evaluation_strategy": "steps", |
|
"exp_group_name": "", |
|
"exp_name": "", |
|
"experiment": "inversion_from_logits_emb", |
|
"fp16": false, |
|
"fp16_backend": "auto", |
|
"fp16_full_eval": false, |
|
"fp16_opt_level": "O1", |
|
"freeze_strategy": "none", |
|
"fsdp": [], |
|
"fsdp_config": { |
|
"min_num_params": 0, |
|
"xla": false, |
|
"xla_fsdp_grad_ckpt": false |
|
}, |
|
"fsdp_min_num_params": 0, |
|
"fsdp_transformer_layer_cls_to_wrap": null, |
|
"full_determinism": false, |
|
"gradient_accumulation_steps": 1, |
|
"gradient_checkpointing": false, |
|
"gradient_checkpointing_kwargs": null, |
|
"greater_is_better": false, |
|
"group_by_length": true, |
|
"half_precision_backend": "auto", |
|
"hub_always_push": false, |
|
"hub_model_id": null, |
|
"hub_private_repo": false, |
|
"hub_strategy": "every_save", |
|
"hub_token": null, |
|
"ignore_data_skip": false, |
|
"include_inputs_for_metrics": true, |
|
"include_tokens_per_second": false, |
|
"jit_mode_eval": false, |
|
"label_names": null, |
|
"label_smoothing_factor": 0.0, |
|
"learning_rate": 0.0002, |
|
"length_column_name": "length", |
|
"load_best_model_at_end": true, |
|
"local_rank": 0, |
|
"log_level": "passive", |
|
"log_level_replica": "warning", |
|
"log_on_each_node": true, |
|
"logging_dir": "saves/llama-align-4/runs/Nov17_12-43-44_mosaic-cirrascale-37.reviz.ai2.in", |
|
"logging_first_step": false, |
|
"logging_nan_inf_filter": true, |
|
"logging_steps": 50, |
|
"logging_strategy": "steps", |
|
"lr_scheduler_type": "linear", |
|
"max_eval_samples": 500, |
|
"max_grad_norm": 1.0, |
|
"max_seq_length": 64, |
|
"max_steps": -1, |
|
"metric_for_best_model": "one_million_instructions_loss", |
|
"mock_embedder": false, |
|
"model_name_or_path": "t5-base", |
|
"model_revision": "main", |
|
"mp_parameters": "", |
|
"neftune_noise_alpha": null, |
|
"no_cuda": false, |
|
"num_repeat_tokens": 16, |
|
"num_train_epochs": 200.0, |
|
"optim": "adamw_torch", |
|
"optim_args": null, |
|
"output_dir": "saves/llama-align-4", |
|
"overwrite_output_dir": false, |
|
"past_index": -1, |
|
"per_device_eval_batch_size": 256, |
|
"per_device_train_batch_size": 256, |
|
"per_gpu_eval_batch_size": null, |
|
"per_gpu_train_batch_size": null, |
|
"prediction_loss_only": false, |
|
"push_to_hub": false, |
|
"push_to_hub_model_id": null, |
|
"push_to_hub_organization": null, |
|
"push_to_hub_token": null, |
|
"ray_scope": "last", |
|
"remove_unused_columns": false, |
|
"report_to": [], |
|
"resume_from_checkpoint": null, |
|
"run_name": "saves/llama-align-4", |
|
"save_on_each_node": false, |
|
"save_safetensors": true, |
|
"save_steps": 500, |
|
"save_strategy": "steps", |
|
"save_total_limit": 2, |
|
"seed": 42, |
|
"skip_memory_metrics": true, |
|
"split_batches": false, |
|
"steps_per_epoch": 500000, |
|
"suffix_conditioning": false, |
|
"tf32": null, |
|
"tokenizer_name": null, |
|
"torch_compile": false, |
|
"torch_compile_backend": null, |
|
"torch_compile_mode": null, |
|
"torch_dtype": "float32", |
|
"torchdynamo": null, |
|
"tpu_metrics_debug": false, |
|
"tpu_num_cores": null, |
|
"transformers_version": "4.35.0", |
|
"use_cpu": false, |
|
"use_frozen_embeddings_as_input": true, |
|
"use_ipex": false, |
|
"use_legacy_prediction_loop": false, |
|
"use_less_data": -1, |
|
"use_lora": false, |
|
"use_mps_device": false, |
|
"use_wandb": false, |
|
"warmup_ratio": 0.0, |
|
"warmup_steps": 12500, |
|
"weight_decay": 0.0 |
|
} |
|
|