architecture: backbone_dtype: bfloat16 gradient_checkpointing: true intermediate_dropout: 0.0 pretrained: true pretrained_weights: '' augmentation: neftune_noise_alpha: 5.0 random_parent_probability: 0.0 skip_parent_probability: 0.0 token_mask_probability: 0.0 dataset: add_eos_token_to_answer: true add_eos_token_to_prompt: true add_eos_token_to_system: true answer_column: ground_truth chatbot_author: H2O.ai chatbot_name: h2oGPT data_sample: 1.0 data_sample_choice: - Train - Validation limit_chained_samples: false mask_prompt_labels: true parent_id_column: None personalize: false prompt_column: - prompt system_column: None text_answer_separator: <|answer|> text_prompt_start: <|prompt|> text_system_start: <|system|> train_dataframe: /home/aaryan/llm_studio_experiments/experiment_50/h2o-llmstudio/data/user/data_for_LLM_Studio_exp_49_backup_classifier_using_LLM_itr_2/train_data_for_LLM_Studio_exp_49_backup_classifier_using_LLM_itr_2.csv validation_dataframe: /home/aaryan/llm_studio_experiments/experiment_50/h2o-llmstudio/data/user/data_for_LLM_Studio_exp_49_backup_classifier_using_LLM_itr_2/validation_data_for_LLM_Studio_exp_49_backup_classifier_using_LLM_itr_2.csv validation_size: 0.01 validation_strategy: custom environment: compile_model: false deepspeed_allgather_bucket_size: 1000000 deepspeed_method: ZeRO2 deepspeed_reduce_bucket_size: 1000000 deepspeed_stage3_param_persistence_threshold: 1000000 deepspeed_stage3_prefetch_bucket_size: 1000000 find_unused_parameters: false gpus: - '0' - '1' huggingface_branch: main mixed_precision: false mixed_precision_dtype: bfloat16 number_of_workers: 8 seed: 77 trust_remote_code: true use_deepspeed: false experiment_name: experiment_50_backup_classifier_finetuning_itr_2 llm_backbone: microsoft/Phi-3-mini-4k-instruct logging: logger: None neptune_project: '' output_directory: /home/aaryan/llm_studio_experiments/experiment_50/h2o-llmstudio/output/user/experiment_50_backup_classifier_finetuning_itr_2/ prediction: batch_size_inference: 2 do_sample: true max_length_inference: 4 max_time: 0.0 metric: BLEU metric_gpt_model: gpt-3.5-turbo-0301 metric_gpt_template: general min_length_inference: 1 num_beams: 1 num_history: 4 repetition_penalty: 1.2 stop_tokens: '' temperature: 0.3 top_k: 50 top_p: 0.95 problem_type: text_causal_language_modeling tokenizer: add_prompt_answer_tokens: false max_length: 1216 padding_quantile: 1.0 tokenizer_kwargs: '{"use_fast": true, "add_prefix_space": false}' training: batch_size: 4 differential_learning_rate: 1.0e-05 differential_learning_rate_layers: [] drop_last_batch: true epochs: 3 evaluate_before_training: false evaluation_epochs: 1.0 freeze_layers: [] grad_accumulation: 2 gradient_clip: 0.0 learning_rate: 0.0001 lora: true lora_alpha: 32 lora_dropout: 0.05 lora_r: 16 lora_target_modules: gate_up_proj, down_proj, qkv_proj, o_proj lora_unfreeze_layers: [] loss_function: TokenAveragedCrossEntropy optimizer: AdamW save_checkpoint: best schedule: Cosine train_validation_data: false use_dora: false use_flash_attention_2: true warmup_epochs: 0.05 weight_decay: 0.0