base_model_name: meta-llama/Llama-2-7b-hf batch_size: 4 cot: false dataset_name: BENBENBENb/McTest640COT epochs: 20 eval_strategy: epoch learning_rate: 0.0001 logging_steps: 1 output_dir: brettbbb/llama_finetune_mc_20 seed: 42 warmup_steps: 5