base_model: unsloth/Llama-3.2-3B-Instruct-bnb-4bit
datasets:
- microsoft/orca-agentinstruct-1M-v1
pipeline_tag: text-generation
library_name: transformers
license: llama3.2
tags:
- unsloth
- transformers
eval
Tasks | Version | Filter | n-shot | Metric | Value | Stderr | ||
---|---|---|---|---|---|---|---|---|
hellaswag | 1 | none | 0 | acc | ↑ | 0.5141 | ± | 0.0050 |
none | 0 | acc_norm | ↑ | 0.6793 | ± | 0.0047 | ||
leaderboard_bbh | N/A | |||||||
- leaderboard_bbh_boolean_expressions | 1 | none | 3 | acc_norm | ↑ | 0.6040 | ± | 0.0310 |
- leaderboard_bbh_causal_judgement | 1 | none | 3 | acc_norm | ↑ | 0.5668 | ± | 0.0363 |
- leaderboard_bbh_date_understanding | 1 | none | 3 | acc_norm | ↑ | 0.4880 | ± | 0.0317 |
- leaderboard_bbh_disambiguation_qa | 1 | none | 3 | acc_norm | ↑ | 0.3760 | ± | 0.0307 |
- leaderboard_bbh_formal_fallacies | 1 | none | 3 | acc_norm | ↑ | 0.5400 | ± | 0.0316 |
- leaderboard_bbh_geometric_shapes | 1 | none | 3 | acc_norm | ↑ | 0.2200 | ± | 0.0263 |
- leaderboard_bbh_hyperbaton | 1 | none | 3 | acc_norm | ↑ | 0.5640 | ± | 0.0314 |
- leaderboard_bbh_logical_deduction_five_objects | 1 | none | 3 | acc_norm | ↑ | 0.4560 | ± | 0.0316 |
- leaderboard_bbh_logical_deduction_seven_objects | 1 | none | 3 | acc_norm | ↑ | 0.4360 | ± | 0.0314 |
- leaderboard_bbh_logical_deduction_three_objects | 1 | none | 3 | acc_norm | ↑ | 0.4880 | ± | 0.0317 |
- leaderboard_bbh_movie_recommendation | 1 | none | 3 | acc_norm | ↑ | 0.6360 | ± | 0.0305 |
- leaderboard_bbh_navigate | 1 | none | 3 | acc_norm | ↑ | 0.6200 | ± | 0.0308 |
- leaderboard_bbh_object_counting | 1 | none | 3 | acc_norm | ↑ | 0.4120 | ± | 0.0312 |
- leaderboard_bbh_penguins_in_a_table | 1 | none | 3 | acc_norm | ↑ | 0.3219 | ± | 0.0388 |
- leaderboard_bbh_reasoning_about_colored_objects | 1 | none | 3 | acc_norm | ↑ | 0.3440 | ± | 0.0301 |
- leaderboard_bbh_ruin_names | 1 | none | 3 | acc_norm | ↑ | 0.3240 | ± | 0.0297 |
- leaderboard_bbh_salient_translation_error_detection | 1 | none | 3 | acc_norm | ↑ | 0.3120 | ± | 0.0294 |
- leaderboard_bbh_snarks | 1 | none | 3 | acc_norm | ↑ | 0.4494 | ± | 0.0374 |
- leaderboard_bbh_sports_understanding | 1 | none | 3 | acc_norm | ↑ | 0.6040 | ± | 0.0310 |
- leaderboard_bbh_temporal_sequences | 1 | none | 3 | acc_norm | ↑ | 0.1000 | ± | 0.0190 |
- leaderboard_bbh_tracking_shuffled_objects_five_objects | 1 | none | 3 | acc_norm | ↑ | 0.1600 | ± | 0.0232 |
- leaderboard_bbh_tracking_shuffled_objects_seven_objects | 1 | none | 3 | acc_norm | ↑ | 0.1200 | ± | 0.0206 |
- leaderboard_bbh_tracking_shuffled_objects_three_objects | 1 | none | 3 | acc_norm | ↑ | 0.3440 | ± | 0.0301 |
- leaderboard_bbh_web_of_lies | 1 | none | 3 | acc_norm | ↑ | 0.5160 | ± | 0.0317 |
leaderboard_gpqa | N/A | |||||||
- leaderboard_gpqa_diamond | 1 | none | 0 | acc_norm | ↑ | 0.2727 | ± | 0.0317 |
- leaderboard_gpqa_extended | 1 | none | 0 | acc_norm | ↑ | 0.2802 | ± | 0.0192 |
- leaderboard_gpqa_main | 1 | none | 0 | acc_norm | ↑ | 0.2545 | ± | 0.0206 |
leaderboard_ifeval | 3 | none | 0 | inst_level_loose_acc | ↑ | 0.5252 | ± | N/A |
none | 0 | inst_level_strict_acc | ↑ | 0.4748 | ± | N/A | ||
none | 0 | prompt_level_loose_acc | ↑ | 0.3919 | ± | 0.0210 | ||
none | 0 | prompt_level_strict_acc | ↑ | 0.3420 | ± | 0.0204 | ||
leaderboard_math_hard | N/A | |||||||
- leaderboard_math_algebra_hard | 2 | none | 4 | exact_match | ↑ | 0.2150 | ± | 0.0235 |
- leaderboard_math_counting_and_prob_hard | 2 | none | 4 | exact_match | ↑ | 0.0244 | ± | 0.0140 |
- leaderboard_math_geometry_hard | 2 | none | 4 | exact_match | ↑ | 0.0606 | ± | 0.0208 |
- leaderboard_math_intermediate_algebra_hard | 2 | none | 4 | exact_match | ↑ | 0.0143 | ± | 0.0071 |
- leaderboard_math_num_theory_hard | 2 | none | 4 | exact_match | ↑ | 0.0649 | ± | 0.0199 |
- leaderboard_math_prealgebra_hard | 2 | none | 4 | exact_match | ↑ | 0.1762 | ± | 0.0275 |
- leaderboard_math_precalculus_hard | 2 | none | 4 | exact_match | ↑ | 0.0519 | ± | 0.0192 |
leaderboard_mmlu_pro | 0.1 | none | 5 | acc | ↑ | 0.2822 | ± | 0.0041 |
leaderboard_musr | N/A | |||||||
- leaderboard_musr_murder_mysteries | 1 | none | 0 | acc_norm | ↑ | 0.5400 | ± | 0.0316 |
- leaderboard_musr_object_placements | 1 | none | 0 | acc_norm | ↑ | 0.2344 | ± | 0.0265 |
- leaderboard_musr_team_allocation | 1 | none | 0 | acc_norm | ↑ | 0.3200 | ± | 0.0296 |
Framework versions
- unsloth 2024.11.5
- trl 0.12.0
Training HW
- V100
{ "results": { "leaderboard_musr": { " ": " ", "alias": "leaderboard_musr" }, "leaderboard_musr_murder_mysteries": { "alias": " - leaderboard_musr_murder_mysteries", "acc_norm,none": 0.54, "acc_norm_stderr,none": 0.03158465389149902 }, "leaderboard_musr_object_placements": { "alias": " - leaderboard_musr_object_placements", "acc_norm,none": 0.234375, "acc_norm_stderr,none": 0.02652733398834892 }, "leaderboard_musr_team_allocation": { "alias": " - leaderboard_musr_team_allocation", "acc_norm,none": 0.32, "acc_norm_stderr,none": 0.029561724955241033 } }, "group_subtasks": { "leaderboard_musr": [ "leaderboard_musr_murder_mysteries", "leaderboard_musr_object_placements", "leaderboard_musr_team_allocation" ] }, "configs": { "leaderboard_musr_murder_mysteries": { "task": "leaderboard_musr_murder_mysteries", "dataset_path": "TAUR-Lab/MuSR", "test_split": "murder_mysteries", "doc_to_text": "def doc_to_text(doc):\n """\n Convert a doc to text.\n """\n choices = ""\n for i, choice in enumerate(ast.literal_eval(doc["choices"])):\n choices += f"{i+1} - {choice}\n"\n\n text = DOC_TO_TEXT.format(\n narrative=doc["narrative"], question=doc["question"], choices=choices\n )\n\n return text\n", "doc_to_target": "{{answer_choice}}", "doc_to_choice": "{{choices}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 1.0 } }, "leaderboard_musr_object_placements": { "task": "leaderboard_musr_object_placements", "dataset_path": "TAUR-Lab/MuSR", "test_split": "object_placements", "doc_to_text": "def doc_to_text(doc):\n """\n Convert a doc to text.\n """\n choices = ""\n for i, choice in enumerate(ast.literal_eval(doc["choices"])):\n choices += f"{i+1} - {choice}\n"\n\n text = DOC_TO_TEXT.format(\n narrative=doc["narrative"], question=doc["question"], choices=choices\n )\n\n return text\n", "doc_to_target": "{{answer_choice}}", "doc_to_choice": "{{choices}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 1.0 } }, "leaderboard_musr_team_allocation": { "task": "leaderboard_musr_team_allocation", "dataset_path": "TAUR-Lab/MuSR", "test_split": "team_allocation", "doc_to_text": "def doc_to_text(doc):\n """\n Convert a doc to text.\n """\n choices = ""\n for i, choice in enumerate(ast.literal_eval(doc["choices"])):\n choices += f"{i+1} - {choice}\n"\n\n text = DOC_TO_TEXT.format(\n narrative=doc["narrative"], question=doc["question"], choices=choices\n )\n\n return text\n", "doc_to_target": "{{answer_choice}}", "doc_to_choice": "{{choices}}", "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "num_fewshot": 0, "metric_list": [ { "metric": "acc_norm", "aggregation": "mean", "higher_is_better": true } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 1.0 } } }, "versions": { "leaderboard_musr_murder_mysteries": 1.0, "leaderboard_musr_object_placements": 1.0, "leaderboard_musr_team_allocation": 1.0 }, "n-shot": { "leaderboard_musr_murder_mysteries": 0, "leaderboard_musr_object_placements": 0, "leaderboard_musr_team_allocation": 0 }, "higher_is_better": { "leaderboard_musr": { "acc_norm": true }, "leaderboard_musr_murder_mysteries": { "acc_norm": true }, "leaderboard_musr_object_placements": { "acc_norm": true }, "leaderboard_musr_team_allocation": { "acc_norm": true } }, "n-samples": { "leaderboard_musr_murder_mysteries": { "original": 250, "effective": 250 }, "leaderboard_musr_object_placements": { "original": 256, "effective": 256 }, "leaderboard_musr_team_allocation": { "original": 250, "effective": 250 } }, "config": { "model": "hf", "model_args": "pretrained=DevQuasar/analytical_reasoning_r16a32_unsloth-Llama-3.2-3B-Instruct-bnb-4bit", "batch_size": "auto:4", "batch_sizes": [ 16, 16, 16, 32 ], "device": null, "use_cache": "eval_cache", "limit": null, "bootstrap_iters": 100000, "gen_kwargs": null, "random_seed": 0, "numpy_seed": 1234, "torch_seed": 1234, "fewshot_seed": 1234 }, "git_hash": "0230356", "date": 1732986471.4917576, "pretty_env_info": "PyTorch version: 2.5.1+cu124\nIs debug build: False\nCUDA used to build PyTorch: 12.4\nROCM used to build PyTorch: N/A\n\nOS: Debian GNU/Linux 12 (bookworm) (x86_64)\nGCC version: (Debian 12.2.0-14) 12.2.0\nClang version: Could not collect\nCMake version: Could not collect\nLibc version: glibc-2.36\n\nPython version: 3.11.10 (main, Oct 3 2024, 07:29:13) [GCC 11.2.0] (64-bit runtime)\nPython platform: Linux-6.1.0-26-amd64-x86_64-with-glibc2.36\nIs CUDA available: True\nCUDA runtime version: Could not collect\nCUDA_MODULE_LOADING set to: LAZY\nGPU models and configuration: \nGPU 0: NVIDIA GeForce GTX 1050 Ti\nGPU 1: Tesla P40\nGPU 2: Tesla V100-PCIE-32GB\nGPU 3: Tesla V100-PCIE-32GB\n\nNvidia driver version: 535.183.01\ncuDNN version: Could not collect\nHIP runtime version: N/A\nMIOpen runtime version: N/A\nIs XNNPACK available: True\n\nCPU:\nArchitecture: x86_64\nCPU op-mode(s): 32-bit, 64-bit\nAddress sizes: 43 bits physical, 48 bits virtual\nByte Order: Little Endian\nCPU(s): 32\nOn-line CPU(s) list: 0-31\nVendor ID: AuthenticAMD\nModel name: AMD Ryzen Threadripper 1950X 16-Core Processor\nCPU family: 23\nModel: 1\nThread(s) per core: 2\nCore(s) per socket: 16\nSocket(s): 1\nStepping: 1\nFrequency boost: enabled\nCPU(s) scaling MHz: 66%\nCPU max MHz: 3400.0000\nCPU min MHz: 2200.0000\nBogoMIPS: 6786.43\nFlags: fpu vme de pse tsc msr pae mce cx8 apic sep mtrr pge mca cmov pat pse36 clflush mmx fxsr sse sse2 ht syscall nx mmxext fxsr_opt pdpe1gb rdtscp lm constant_tsc rep_good nopl nonstop_tsc cpuid extd_apicid amd_dcm aperfmperf rapl pni pclmulqdq monitor ssse3 fma cx16 sse4_1 sse4_2 movbe popcnt aes xsave avx f16c rdrand lahf_lm cmp_legacy svm extapic cr8_legacy abm sse4a misalignsse 3dnowprefetch osvw skinit wdt tce topoext perfctr_core perfctr_nb bpext perfctr_llc mwaitx cpb hw_pstate ssbd ibpb vmmcall fsgsbase bmi1 avx2 smep bmi2 rdseed adx smap clflushopt sha_ni xsaveopt xsavec xgetbv1 clzero irperf xsaveerptr arat npt lbrv svm_lock nrip_save tsc_scale vmcb_clean flushbyasid decodeassists pausefilter pfthreshold avic v_vmsave_vmload vgif overflow_recov succor smca sev\nVirtualization: AMD-V\nL1d cache: 512 KiB (16 instances)\nL1i cache: 1 MiB (16 instances)\nL2 cache: 8 MiB (16 instances)\nL3 cache: 32 MiB (4 instances)\nNUMA node(s): 1\nNUMA node0 CPU(s): 0-31\nVulnerability Gather data sampling: Not affected\nVulnerability Itlb multihit: Not affected\nVulnerability L1tf: Not affected\nVulnerability Mds: Not affected\nVulnerability Meltdown: Not affected\nVulnerability Mmio stale data: Not affected\nVulnerability Reg file data sampling: Not affected\nVulnerability Retbleed: Mitigation; untrained return thunk; SMT vulnerable\nVulnerability Spec rstack overflow: Mitigation; safe RET\nVulnerability Spec store bypass: Mitigation; Speculative Store Bypass disabled via prctl\nVulnerability Spectre v1: Mitigation; usercopy/swapgs barriers and __user pointer sanitization\nVulnerability Spectre v2: Mitigation; Retpolines; IBPB conditional; STIBP disabled; RSB filling; PBRSB-eIBRS Not affected; BHI Not affected\nVulnerability Srbds: Not affected\nVulnerability Tsx async abort: Not affected\n\nVersions of relevant libraries:\n[pip3] numpy==2.1.3\n[pip3] torch==2.5.1\n[pip3] triton==3.1.0\n[conda] numpy 2.1.3 pypi_0 pypi\n[conda] torch 2.5.1 pypi_0 pypi\n[conda] triton 3.1.0 pypi_0 pypi", "transformers_version": "4.46.3", "upper_git_hash": null, "tokenizer_pad_token": [ "<|finetune_right_pad_id|>", "128004" ], "tokenizer_eos_token": [ "<|eot_id|>", "128009" ], "tokenizer_bos_token": [ "<|begin_of_text|>", "128000" ], "eot_token_id": 128009, "max_length": 131072, "task_hashes": { "leaderboard_musr_murder_mysteries": "a696259562ea5c5c09a2613e30526fae1de29f55da9e28e8d7e8a53027e6d330", "leaderboard_musr_object_placements": "3aa8c5e5bc59cd6ba2326269b9f0bf3cee8cba1b4e9e1d1330cf5f1f59ea0dce", "leaderboard_musr_team_allocation": "5a75f135c145ee861a1cf31b63346709ef41b9d542be6a61c5818c210a3797a5" }, "model_source": "hf", "model_name": "DevQuasar/analytical_reasoning_r16a32_unsloth-Llama-3.2-3B-Instruct-bnb-4bit", "model_name_sanitized": "DevQuasar__analytical_reasoning_r16a32_unsloth-Llama-3.2-3B-Instruct-bnb-4bit", "system_instruction": null, "system_instruction_sha": null, "fewshot_as_multiturn": false, "chat_template": null, "chat_template_sha": null, "start_time": 52195.45405349, "end_time": 52407.302247922, "total_evaluation_time_seconds": "211.84819443200104" }