Model Card for Model ID
Pre-trained model: TinyLlama-1.1B-intermediate-step-1195k-token-2.5T
LoRa: no
Model Details
{ "model_args": { "model_name_or_path": "TinyLlama/TinyLlama-1.1B-intermediate-step-1195k-token-2.5T", "tokenizer_name": "None", "cache_dir": "None", "use_fast_tokenizer": "True", "model_revision": "main", "trust_remote_code": "True", "torch_dtype": "bfloat16", "low_cpu_mem_usage": "False", "hf_flash_attn2": "False", "pytorch_flash_attn1": "False", "lora": "False", "lora_rank": "6", "lora_alpha": "12", "lora_dropout": "0.05", "lora_target_module": "['q_proj', 'v_proj', 'k_proj']" }, "training_args": { "output_dir": "runs/results/residual_trainings_hub_24_07", "overwrite_output_dir": true, "do_train": true, "do_eval": true, "do_predict": false, "eval_strategy": "epoch", "prediction_loss_only": false, "per_device_train_batch_size": 4, "per_device_eval_batch_size": 64, "per_gpu_train_batch_size": null, "per_gpu_eval_batch_size": null, "gradient_accumulation_steps": 8, "eval_accumulation_steps": null, "eval_delay": 0, "torch_empty_cache_steps": null, "learning_rate": 2e-05, "weight_decay": 0.1, "adam_beta1": 0.9, "adam_beta2": 0.999, "adam_epsilon": 1e-08, "max_grad_norm": 1.0, "num_train_epochs": 7, "max_steps": null, "lr_scheduler_type": "cosine", "lr_scheduler_kwargs": {}, "warmup_ratio": 0.0, "warmup_steps": 100, "log_level": "passive", "log_level_replica": "warning", "log_on_each_node": true, "logging_dir": "runs/tensorboard", "logging_strategy": "steps", "logging_first_step": false, "logging_steps": 500, "logging_nan_inf_filter": true, "save_strategy": "epoch", "save_steps": 500, "save_total_limit": null, "save_safetensors": true, "save_on_each_node": false, "save_only_model": false, "restore_callback_states_from_checkpoint": false, "no_cuda": false, "use_cpu": false, "use_mps_device": false, "seed": 42, "data_seed": null, "jit_mode_eval": false, "use_ipex": false, "bf16": true, "fp16": false, "fp16_opt_level": "O1", "half_precision_backend": "auto", "bf16_full_eval": false, "fp16_full_eval": false, "tf32": null, "local_rank": 0, "ddp_backend": null, "tpu_num_cores": null, "tpu_metrics_debug": false, "debug": [], "dataloader_drop_last": false, "eval_steps": null, "dataloader_num_workers": 8, "dataloader_prefetch_factor": null, "past_index": -1, "run_name": "hub_24_07", "disable_tqdm": false, "remove_unused_columns": true, "label_names": null, "load_best_model_at_end": false, "metric_for_best_model": null, "greater_is_better": null, "ignore_data_skip": false, "fsdp": [], "fsdp_min_num_params": 0, "fsdp_config": { "min_num_params": 0, "xla": false, "xla_fsdp_v2": false, "xla_fsdp_grad_ckpt": false }, "fsdp_transformer_layer_cls_to_wrap": null, "accelerator_config": { "split_batches": false, "dispatch_batches": null, "even_batches": true, "use_seedable_sampler": true, "non_blocking": false, "gradient_accumulation_kwargs": null }, "deepspeed": null, "label_smoothing_factor": 0.0, "optim": "adamw_torch", "optim_args": null, "adafactor": false, "group_by_length": false, "length_column_name": "length", "report_to": [ "tensorboard" ], "ddp_find_unused_parameters": null, "ddp_bucket_cap_mb": null, "ddp_broadcast_buffers": null, "dataloader_pin_memory": true, "dataloader_persistent_workers": false, "skip_memory_metrics": true, "use_legacy_prediction_loop": false, "push_to_hub": false, "resume_from_checkpoint": null, "hub_model_id": null, "hub_strategy": "every_save", "hub_token": "", "hub_private_repo": false, "hub_always_push": false, "gradient_checkpointing": false, "gradient_checkpointing_kwargs": null, "include_inputs_for_metrics": false, "eval_do_concat_batches": true, "fp16_backend": "auto", "evaluation_strategy": null, "push_to_hub_model_id": null, "push_to_hub_organization": null, "push_to_hub_token": "", "mp_parameters": "", "auto_find_batch_size": false, "full_determinism": false, "torchdynamo": null, "ray_scope": "last", "ddp_timeout": 1800, "torch_compile": false, "torch_compile_backend": null, "torch_compile_mode": null, "dispatch_batches": null, "split_batches": null, "include_tokens_per_second": false, "include_num_input_tokens_seen": false, "neftune_noise_alpha": null, "optim_target_modules": null, "batch_eval_metrics": false, "eval_on_start": false, "eval_use_gather_object": false }, "data_args": { "dataset_name": "iamtarun/code_instructions_120k_alpaca", "dataset_config_name": "None", "text_column_name": "text", "streaming": "False", "cache_dir_dataset": "None", "overwrite_cache": "False", "validation_split_percentage": "5", "preprocessing_num_workers": "2", "packing": "False", "ift": "True", "ift_template": "alpaca", "train_on_inputs": "True", "train_on_sys_prompt": "True", "chat": "False" } }
Model Sources [optional]
- Repository: https://github.com/jzhang38/TinyLlama
- Downloads last month
- 136