pszemraj commited on
Commit
d37d9b1
1 Parent(s): 15f2cf1

Upload grammar-synthesis-small-WIP-ft3-jflAUG-v5_training_metadata.json

Browse files
grammar-synthesis-small-WIP-ft3-jflAUG-v5_training_metadata.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"output_dir": "/content/drive/MyDrive/Programming/hf-trainer/grammar-synthesis-small-WIP-ft3-jflAUG-v5", "overwrite_output_dir": true, "do_train": false, "do_eval": false, "do_predict": false, "evaluation_strategy": "no", "prediction_loss_only": false, "per_device_train_batch_size": 16, "per_device_eval_batch_size": 16, "per_gpu_train_batch_size": "None", "per_gpu_eval_batch_size": "None", "gradient_accumulation_steps": 32, "eval_accumulation_steps": "None", "eval_delay": 0, "learning_rate": 0.0004, "weight_decay": 0.05, "adam_beta1": 0.9, "adam_beta2": 0.999, "adam_epsilon": 1e-08, "max_grad_norm": 0.5, "num_train_epochs": 4, "max_steps": -1, "lr_scheduler_type": "cosine", "warmup_ratio": 0.03, "warmup_steps": 0, "log_level": -1, "log_level_replica": -1, "log_on_each_node": true, "logging_dir": "/content/drive/MyDrive/Programming/hf-trainer/grammar-synthesis-small-WIP-ft3-jflAUG-v5/logs", "logging_strategy": "steps", "logging_first_step": false, "logging_steps": 2, "logging_nan_inf_filter": true, "save_strategy": "epoch", "save_steps": 500, "save_total_limit": 1, "save_on_each_node": false, "no_cuda": false, "seed": 42, "data_seed": "None", "jit_mode_eval": false, "use_ipex": false, "bf16": false, "fp16": true, "fp16_opt_level": "O1", "half_precision_backend": "cuda_amp", "bf16_full_eval": false, "fp16_full_eval": false, "tf32": "None", "local_rank": 0, "xpu_backend": "None", "tpu_num_cores": "None", "tpu_metrics_debug": false, "debug": "[]", "dataloader_drop_last": false, "eval_steps": "None", "dataloader_num_workers": 0, "past_index": -1, "run_name": "/content/drive/MyDrive/Programming/hf-trainer/grammar-synthesis-small-WIP-ft3-jflAUG-v5", "disable_tqdm": false, "remove_unused_columns": true, "label_names": "None", "load_best_model_at_end": false, "metric_for_best_model": "None", "greater_is_better": "None", "ignore_data_skip": false, "sharded_ddp": "[]", "fsdp": "[]", "fsdp_min_num_params": 0, "deepspeed": "/content/ds_config_zero2.json", "label_smoothing_factor": 0.0, "optim": "adamw_hf", "adafactor": false, "group_by_length": false, "length_column_name": "length", "report_to": "['tensorboard']", "ddp_find_unused_parameters": "None", "ddp_bucket_cap_mb": "None", "dataloader_pin_memory": true, "skip_memory_metrics": true, "use_legacy_prediction_loop": false, "push_to_hub": true, "resume_from_checkpoint": "None", "hub_model_id": "grammar-synthesis-small-WIP-ft3-jflAUG-v5", "hub_strategy": "end", "hub_token": "<HUB_TOKEN>", "hub_private_repo": true, "gradient_checkpointing": true, "include_inputs_for_metrics": false, "fp16_backend": "auto", "push_to_hub_model_id": "None", "push_to_hub_organization": "None", "push_to_hub_token": "<PUSH_TO_HUB_TOKEN>", "_n_gpu": 1, "mp_parameters": "", "auto_find_batch_size": false, "full_determinism": false, "torchdynamo": "None", "ray_scope": "last", "sortish_sampler": false, "predict_with_generate": false, "generation_max_length": "None", "generation_num_beams": "None", "train_batch_size": 16, "eval_batch_size": 16, "configs_src": "grammar-synthesis-small-WIP-ft3-jflAUG-v5"}