{ "results": { "cb": { "acc,none": 0.25, "acc_stderr,none": 0.058387420812114225, "f1,none": 0.2407230196703881, "f1_stderr,none": "N/A", "alias": "cb" } }, "configs": { "cb": { "task": "cb", "group": [ "super-glue-lm-eval-v1" ], "dataset_path": "super_glue", "dataset_name": "cb", "training_split": "train", "validation_split": "validation", "doc_to_text": "{{premise}}\nQuestion: {{hypothesis}}. True, False, or Neither?\nAnswer:", "doc_to_target": "label", "doc_to_choice": [ "True", "False", "Neither" ], "description": "", "target_delimiter": " ", "fewshot_delimiter": "\n\n", "metric_list": [ { "metric": "acc" }, { "metric": "f1", "aggregation": "def cb_multi_fi(items):\n preds, golds = zip(*items)\n preds = np.array(preds)\n golds = np.array(golds)\n f11 = sklearn.metrics.f1_score(y_true=golds == 0, y_pred=preds == 0)\n f12 = sklearn.metrics.f1_score(y_true=golds == 1, y_pred=preds == 1)\n f13 = sklearn.metrics.f1_score(y_true=golds == 2, y_pred=preds == 2)\n avg_f1 = np.mean([f11, f12, f13])\n return avg_f1\n" } ], "output_type": "multiple_choice", "repeats": 1, "should_decontaminate": false, "metadata": { "version": 1.0 } } }, "versions": { "cb": 1.0 }, "n-shot": { "cb": 0 }, "config": { "model": "hf", "model_args": "pretrained=TinyLlama/TinyLlama-1.1B-Chat-v1.0,dtype=bfloat16,trust_remote_code=True", "batch_size": "auto", "batch_sizes": [ 64 ], "device": null, "use_cache": null, "limit": null, "bootstrap_iters": 100000, "gen_kwargs": null }, "git_hash": "62513ca" }